branching: merge default into stable stable 6.4rc0
authorRaphaël Gomès <rgomes@octobus.net>
Thu, 02 Mar 2023 22:45:44 +0100
branchstable
changeset 50269 05de4896508e
parent 50250 1ded5b48b8aa (current diff)
parent 50268 ffdfb1066ac6 (diff)
child 50270 c15c149ae0dd
branching: merge default into stable
--- a/.gitlab/merge_request_templates/Default.md	Thu Mar 02 15:21:36 2023 +0100
+++ b/.gitlab/merge_request_templates/Default.md	Thu Mar 02 22:45:44 2023 +0100
@@ -1,5 +1,8 @@
 /assign_reviewer @mercurial.review
 
+
+<!--
+
 Welcome to the Mercurial Merge Request creation process:
 
 * Set a simple title for your MR,
@@ -11,3 +14,5 @@
 
 * https://www.mercurial-scm.org/wiki/ContributingChanges
 * https://www.mercurial-scm.org/wiki/Heptapod
+
+-->
--- a/Makefile	Thu Mar 02 15:21:36 2023 +0100
+++ b/Makefile	Thu Mar 02 22:45:44 2023 +0100
@@ -138,6 +138,7 @@
         # Run Rust tests if cargo is installed
 	if command -v $(CARGO) >/dev/null 2>&1; then \
 		$(MAKE) rust-tests; \
+		$(MAKE) cargo-clippy; \
 	fi
 	cd tests && $(PYTHON) run-tests.py $(TESTFLAGS)
 
@@ -152,9 +153,13 @@
 	cd tests && $(HGPYTHONS)/$*/bin/python run-tests.py $(TESTFLAGS)
 
 rust-tests:
-	cd $(HGROOT)/rust/hg-cpython \
+	cd $(HGROOT)/rust \
 		&& $(CARGO) test --quiet --all --features "$(HG_RUST_FEATURES)"
 
+cargo-clippy:
+	cd $(HGROOT)/rust \
+		&& $(CARGO) clippy --all --features "$(HG_RUST_FEATURES)" -- -D warnings
+
 check-code:
 	hg manifest | xargs python contrib/check-code.py
 
--- a/contrib/check-code.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/contrib/check-code.py	Thu Mar 02 22:45:44 2023 +0100
@@ -372,10 +372,6 @@
         ),
         (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="),
         (
-            r'\([^()]*( =[^=]|[^<>!=]= )',
-            "no whitespace around = for named parameters",
-        ),
-        (
             r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
             "don't use old-style two-argument raise, use Exception(message)",
         ),
--- a/contrib/check-pytype.sh	Thu Mar 02 15:21:36 2023 +0100
+++ b/contrib/check-pytype.sh	Thu Mar 02 22:45:44 2023 +0100
@@ -12,6 +12,36 @@
 # endeavor to empty this list out over time, as some of these are
 # probably hiding real problems.
 #
+# hgext/absorb.py               # [attribute-error]
+# hgext/bugzilla.py             # [pyi-error], [attribute-error]
+# hgext/convert/bzr.py          # [attribute-error]
+# hgext/convert/cvs.py          # [attribute-error], [wrong-arg-types]
+# hgext/convert/cvsps.py        # [attribute-error]
+# hgext/convert/p4.py           # [wrong-arg-types] (__file: mercurial.utils.procutil._pfile -> IO)
+# hgext/convert/subversion.py   # [attribute-error], [name-error], [pyi-error]
+# hgext/fastannotate/context.py # no linelog.copyfrom()
+# hgext/fastannotate/formatter.py  # [unsupported-operands]
+# hgext/fsmonitor/__init__.py   # [name-error]
+# hgext/git/__init__.py         # [attribute-error]
+# hgext/githelp.py              # [attribute-error] [wrong-arg-types]
+# hgext/hgk.py                  # [attribute-error]
+# hgext/histedit.py             # [attribute-error], [wrong-arg-types]
+# hgext/infinitepush            # using bytes for str literal; scheduled for removal
+# hgext/keyword.py              # [attribute-error]
+# hgext/largefiles/storefactory.py  # [attribute-error]
+# hgext/lfs/__init__.py         # [attribute-error]
+# hgext/narrow/narrowbundle2.py # [attribute-error]
+# hgext/narrow/narrowcommands.py    # [attribute-error], [name-error]
+# hgext/rebase.py               # [attribute-error]
+# hgext/remotefilelog/basepack.py   # [attribute-error], [wrong-arg-count]
+# hgext/remotefilelog/basestore.py  # [attribute-error]
+# hgext/remotefilelog/contentstore.py   # [missing-parameter], [wrong-keyword-args], [attribute-error]
+# hgext/remotefilelog/fileserverclient.py  # [attribute-error]
+# hgext/remotefilelog/shallowbundle.py     # [attribute-error]
+# hgext/remotefilelog/remotefilectx.py  # [module-attr] (This is an actual bug)
+# hgext/sqlitestore.py          # [attribute-error]
+# hgext/zeroconf/__init__.py    # bytes vs str; tests fail on macOS
+#
 # mercurial/bundlerepo.py       # no vfs and ui attrs on bundlerepo
 # mercurial/context.py          # many [attribute-error]
 # mercurial/crecord.py          # tons of [attribute-error], [module-attr]
@@ -31,7 +61,6 @@
 # mercurial/pure/parsers.py     # [attribute-error]
 # mercurial/repoview.py         # [attribute-error]
 # mercurial/testing/storage.py  # tons of [attribute-error]
-# mercurial/ui.py               # [attribute-error], [wrong-arg-types]
 # mercurial/unionrepo.py        # ui, svfs, unfiltered [attribute-error]
 # mercurial/win32.py            # [not-callable]
 # mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
@@ -43,7 +72,37 @@
 
 # TODO: include hgext and hgext3rd
 
-pytype -V 3.7 --keep-going --jobs auto mercurial \
+pytype -V 3.7 --keep-going --jobs auto \
+    doc/check-seclevel.py hgdemandimport hgext mercurial \
+    -x hgext/absorb.py \
+    -x hgext/bugzilla.py \
+    -x hgext/convert/bzr.py \
+    -x hgext/convert/cvs.py \
+    -x hgext/convert/cvsps.py \
+    -x hgext/convert/p4.py \
+    -x hgext/convert/subversion.py \
+    -x hgext/fastannotate/context.py \
+    -x hgext/fastannotate/formatter.py \
+    -x hgext/fsmonitor/__init__.py \
+    -x hgext/git/__init__.py \
+    -x hgext/githelp.py \
+    -x hgext/hgk.py \
+    -x hgext/histedit.py \
+    -x hgext/infinitepush \
+    -x hgext/keyword.py \
+    -x hgext/largefiles/storefactory.py \
+    -x hgext/lfs/__init__.py \
+    -x hgext/narrow/narrowbundle2.py \
+    -x hgext/narrow/narrowcommands.py \
+    -x hgext/rebase.py \
+    -x hgext/remotefilelog/basepack.py \
+    -x hgext/remotefilelog/basestore.py \
+    -x hgext/remotefilelog/contentstore.py \
+    -x hgext/remotefilelog/fileserverclient.py \
+    -x hgext/remotefilelog/remotefilectx.py \
+    -x hgext/remotefilelog/shallowbundle.py \
+    -x hgext/sqlitestore.py \
+    -x hgext/zeroconf/__init__.py \
     -x mercurial/bundlerepo.py \
     -x mercurial/context.py \
     -x mercurial/crecord.py \
@@ -64,9 +123,11 @@
     -x mercurial/repoview.py \
     -x mercurial/testing/storage.py \
     -x mercurial/thirdparty \
-    -x mercurial/ui.py \
     -x mercurial/unionrepo.py \
     -x mercurial/win32.py \
     -x mercurial/wireprotoframing.py \
     -x mercurial/wireprotov1peer.py \
     -x mercurial/wireprotov1server.py
+
+echo 'pytype crashed while generating the following type stubs:'
+find .pytype/pyi -name '*.pyi' | xargs grep -l '# Caught error' | sort
--- a/contrib/fuzz/revlog.cc	Thu Mar 02 15:21:36 2023 +0100
+++ b/contrib/fuzz/revlog.cc	Thu Mar 02 22:45:44 2023 +0100
@@ -20,7 +20,7 @@
         index, cache = parsers.parse_index2(data, inline)
         index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
         index.stats()
-        index.findsnapshots({}, 0)
+        index.findsnapshots({}, 0, len(index) - 1)
         10 in index
         for rev in range(len(index)):
             index.reachableroots(0, [len(index)-1], [rev])
--- a/contrib/heptapod-ci.yml	Thu Mar 02 15:21:36 2023 +0100
+++ b/contrib/heptapod-ci.yml	Thu Mar 02 22:45:44 2023 +0100
@@ -42,6 +42,7 @@
     script:
         - echo "python used, $PYTHON"
         - make rust-tests
+        - make cargo-clippy
     variables:
         PYTHON: python3
         CI_CLEVER_CLOUD_FLAVOR: S
@@ -91,7 +92,8 @@
       - hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
       - cd /tmp/mercurial-ci/
       - make local PYTHON=$PYTHON
-      - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.03.29
+      - $PYTHON -m pip install --user -U libcst==0.3.20 pytype==2022.11.18
+      - ./contrib/setup-pytype.sh
     script:
       - echo "Entering script section"
       - sh contrib/check-pytype.sh
--- a/contrib/perf.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/contrib/perf.py	Thu Mar 02 22:45:44 2023 +0100
@@ -235,6 +235,7 @@
 
 cmdtable = {}
 
+
 # for "historical portability":
 # define parsealiases locally, because cmdutil.parsealiases has been
 # available since 1.5 (or 6252852b4332)
@@ -573,7 +574,6 @@
 
 
 def formatone(fm, timings, title=None, result=None, displayall=False):
-
     count = len(timings)
 
     fm.startitem()
@@ -815,7 +815,12 @@
             )
             sum(map(bool, s))
 
-        timer(status_dirstate)
+        if util.safehasattr(dirstate, 'running_status'):
+            with dirstate.running_status(repo):
+                timer(status_dirstate)
+                dirstate.invalidate()
+        else:
+            timer(status_dirstate)
     else:
         timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
     fm.end()
@@ -997,11 +1002,16 @@
     timer, fm = gettimer(ui, opts)
 
     try:
-        from mercurial.utils.urlutil import get_unique_pull_path
-
-        path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
+        from mercurial.utils.urlutil import get_unique_pull_path_obj
+
+        path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
     except ImportError:
-        path = ui.expandpath(path)
+        try:
+            from mercurial.utils.urlutil import get_unique_pull_path
+
+            path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
+        except ImportError:
+            path = ui.expandpath(path)
 
     def s():
         repos[1] = hg.peer(ui, opts, path)
@@ -1469,7 +1479,8 @@
     def d():
         ds.write(repo.currenttransaction())
 
-    timer(d, setup=setup)
+    with repo.wlock():
+        timer(d, setup=setup)
     fm.end()
 
 
@@ -1613,7 +1624,11 @@
             b'default repository not configured!',
             hint=b"see 'hg help config.paths'",
         )
-    dest = path.pushloc or path.loc
+    if util.safehasattr(path, 'main_path'):
+        path = path.get_push_variant()
+        dest = path.loc
+    else:
+        dest = path.pushloc or path.loc
     ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
     other = hg.peer(repo, opts, dest)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/setup-pytype.sh	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -e
+set -u
+
+# Find the python3 setup that would run pytype
+PYTYPE=`which pytype`
+PYTHON3=`head -n1 ${PYTYPE} | sed -s 's/#!//'`
+
+# Existing stubs that pytype processes live here
+TYPESHED=$(${PYTHON3} -c "import pytype; print(pytype.__path__[0])")/typeshed/stubs
+HG_STUBS=${TYPESHED}/mercurial
+
+echo "Patching typeshed at $HG_STUBS"
+
+rm -rf ${HG_STUBS}
+mkdir -p ${HG_STUBS}
+
+cat > ${HG_STUBS}/METADATA.toml <<EOF
+version = "0.1"
+EOF
+
+
+mkdir -p ${HG_STUBS}/mercurial/cext ${HG_STUBS}/mercurial/thirdparty/attr
+
+touch ${HG_STUBS}/mercurial/__init__.pyi
+touch ${HG_STUBS}/mercurial/cext/__init__.pyi
+touch ${HG_STUBS}/mercurial/thirdparty/__init__.pyi
+
+ln -sf $(hg root)/mercurial/cext/*.{pyi,typed} \
+       ${HG_STUBS}/mercurial/cext
+ln -sf $(hg root)/mercurial/thirdparty/attr/*.{pyi,typed} \
+       ${HG_STUBS}/mercurial/thirdparty/attr
--- a/contrib/testparseutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/contrib/testparseutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -7,14 +7,12 @@
 
 
 import abc
+import builtins
 import re
-import sys
 
 ####################
 # for Python3 compatibility (almost comes from mercurial/pycompat.py)
 
-ispy3 = sys.version_info[0] >= 3
-
 
 def identity(a):
     return a
@@ -38,27 +36,19 @@
     return _rapply(f, xs)
 
 
-if ispy3:
-    import builtins
-
-    def bytestr(s):
-        # tiny version of pycompat.bytestr
-        return s.encode('latin1')
-
-    def sysstr(s):
-        if isinstance(s, builtins.str):
-            return s
-        return s.decode('latin-1')
-
-    def opentext(f):
-        return open(f, 'r')
+def bytestr(s):
+    # tiny version of pycompat.bytestr
+    return s.encode('latin1')
 
 
-else:
-    bytestr = str
-    sysstr = identity
+def sysstr(s):
+    if isinstance(s, builtins.str):
+        return s
+    return s.decode('latin-1')
 
-    opentext = open
+
+def opentext(f):
+    return open(f, 'r')
 
 
 def b2s(x):
--- a/doc/check-seclevel.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/doc/check-seclevel.py	Thu Mar 02 22:45:44 2023 +0100
@@ -46,7 +46,7 @@
 
 
 def checkseclevel(ui, doc, name, initlevel):
-    ui.notenoi18n('checking "%s"\n' % name)
+    ui.notenoi18n(('checking "%s"\n' % name).encode('utf-8'))
     if not isinstance(doc, bytes):
         doc = doc.encode('utf-8')
     blocks, pruned = minirst.parse(doc, 0, ['verbose'])
@@ -70,14 +70,18 @@
         nextlevel = mark2level[mark]
         if curlevel < nextlevel and curlevel + 1 != nextlevel:
             ui.warnnoi18n(
-                'gap of section level at "%s" of %s\n' % (title, name)
+                ('gap of section level at "%s" of %s\n' % (title, name)).encode(
+                    'utf-8'
+                )
             )
             showavailables(ui, initlevel)
             errorcnt += 1
             continue
         ui.notenoi18n(
-            'appropriate section level for "%s %s"\n'
-            % (mark * (nextlevel * 2), title)
+            (
+                'appropriate section level for "%s %s"\n'
+                % (mark * (nextlevel * 2), title)
+            ).encode('utf-8')
         )
         curlevel = nextlevel
 
@@ -90,7 +94,9 @@
         name = k.split(b"|")[0].lstrip(b"^")
         if not entry[0].__doc__:
             ui.notenoi18n(
-                'skip checking %s: no help document\n' % (namefmt % name)
+                (
+                    'skip checking %s: no help document\n' % (namefmt % name)
+                ).encode('utf-8')
             )
             continue
         errorcnt += checkseclevel(
@@ -117,7 +123,9 @@
         mod = extensions.load(ui, name, None)
         if not mod.__doc__:
             ui.notenoi18n(
-                'skip checking %s extension: no help document\n' % name
+                (
+                    'skip checking %s extension: no help document\n' % name
+                ).encode('utf-8')
             )
             continue
         errorcnt += checkseclevel(
@@ -144,7 +152,9 @@
             doc = fp.read()
 
     ui.notenoi18n(
-        'checking input from %s with initlevel %d\n' % (filename, initlevel)
+        (
+            'checking input from %s with initlevel %d\n' % (filename, initlevel)
+        ).encode('utf-8')
     )
     return checkseclevel(ui, doc, 'input from %s' % filename, initlevel)
 
--- a/hgdemandimport/demandimportpy3.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgdemandimport/demandimportpy3.py	Thu Mar 02 22:45:44 2023 +0100
@@ -23,8 +23,6 @@
   enabled.
 """
 
-# This line is unnecessary, but it satisfies test-check-py3-compat.t.
-
 import contextlib
 import importlib.util
 import sys
@@ -39,10 +37,16 @@
     the ignore list.
     """
 
+    _HAS_DYNAMIC_ATTRIBUTES = True  # help pytype not flag self.loader
+
     def exec_module(self, module):
         """Make the module load lazily."""
         with tracing.log('demandimport %s', module):
             if _deactivated or module.__name__ in ignores:
+                # Reset the loader on the module as super() does (issue6725)
+                module.__spec__.loader = self.loader
+                module.__loader__ = self.loader
+
                 self.loader.exec_module(module)
             else:
                 super().exec_module(module)
--- a/hgext/absorb.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/absorb.py	Thu Mar 02 22:45:44 2023 +0100
@@ -881,7 +881,7 @@
 
             dirstate._fsmonitorstate.invalidate = noop
         try:
-            with dirstate.parentchange():
+            with dirstate.changing_parents(self.repo):
                 dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths)
         finally:
             restore()
--- a/hgext/amend.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/amend.py	Thu Mar 02 22:45:44 2023 +0100
@@ -46,6 +46,7 @@
             _(b'mark a branch as closed, hiding it from the branch list'),
         ),
         (b's', b'secret', None, _(b'use the secret phase for committing')),
+        (b'', b'draft', None, _(b'use the draft phase for committing')),
         (b'n', b'note', b'', _(b'store a note on the amend')),
     ]
     + cmdutil.walkopts
@@ -64,6 +65,7 @@
 
     See :hg:`help commit` for more details.
     """
+    cmdutil.check_at_most_one_arg(opts, 'draft', 'secret')
     cmdutil.check_note_size(opts)
 
     with repo.wlock(), repo.lock():
--- a/hgext/automv.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/automv.py	Thu Mar 02 22:45:44 2023 +0100
@@ -59,21 +59,29 @@
     opts = pycompat.byteskwargs(opts)
     renames = None
     disabled = opts.pop(b'no_automv', False)
-    if not disabled:
-        threshold = ui.configint(b'automv', b'similarity')
-        if not 0 <= threshold <= 100:
-            raise error.Abort(_(b'automv.similarity must be between 0 and 100'))
-        if threshold > 0:
-            match = scmutil.match(repo[None], pats, opts)
-            added, removed = _interestingfiles(repo, match)
-            uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-            renames = _findrenames(
-                repo, uipathfn, added, removed, threshold / 100.0
-            )
+    with repo.wlock():
+        if not disabled:
+            threshold = ui.configint(b'automv', b'similarity')
+            if not 0 <= threshold <= 100:
+                raise error.Abort(
+                    _(b'automv.similarity must be between 0 and 100')
+                )
+            if threshold > 0:
+                match = scmutil.match(repo[None], pats, opts)
+                added, removed = _interestingfiles(repo, match)
+                uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
+                renames = _findrenames(
+                    repo, uipathfn, added, removed, threshold / 100.0
+                )
 
-    with repo.wlock():
         if renames is not None:
-            scmutil._markchanges(repo, (), (), renames)
+            with repo.dirstate.changing_files(repo):
+                # XXX this should be wider and integrated with the commit
+                # transaction. At the same time as we do the `addremove` logic
+                # for commit.  However we can't really do better with the
+                # current extension structure, and this is not worse than what
+                # happened before.
+                scmutil._markchanges(repo, (), (), renames)
         return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
 
 
--- a/hgext/blackbox.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/blackbox.py	Thu Mar 02 22:45:44 2023 +0100
@@ -217,6 +217,8 @@
         return
 
     limit = opts.get('limit')
+    assert limit is not None  # help pytype
+
     fp = repo.vfs(b'blackbox.log', b'r')
     lines = fp.read().split(b'\n')
 
--- a/hgext/convert/bzr.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/convert/bzr.py	Thu Mar 02 22:45:44 2023 +0100
@@ -31,11 +31,14 @@
 
 try:
     # bazaar imports
+    # pytype: disable=import-error
     import breezy.bzr.bzrdir
     import breezy.errors
     import breezy.revision
     import breezy.revisionspec
 
+    # pytype: enable=import-error
+
     bzrdir = breezy.bzr.bzrdir
     errors = breezy.errors
     revision = breezy.revision
--- a/hgext/convert/convcmd.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/convert/convcmd.py	Thu Mar 02 22:45:44 2023 +0100
@@ -6,6 +6,7 @@
 # GNU General Public License version 2 or any later version.
 
 import collections
+import heapq
 import os
 import shutil
 
@@ -198,6 +199,59 @@
         self.progress.complete()
 
 
+# Sorters are used by the `toposort` function to maintain a set of revisions
+# which can be converted immediately and pick one
+class branchsorter:
+    """If the previously converted revision has a child in the
+    eligible revisions list, pick it. Return the list head
+    otherwise. Branch sort attempts to minimize branch
+    switching, which is harmful for Mercurial backend
+    compression.
+    """
+
+    def __init__(self, parents):
+        self.nodes = []
+        self.parents = parents
+        self.prev = None
+
+    def picknext(self):
+        next = self.nodes[0]
+        for n in self.nodes:
+            if self.prev in self.parents[n]:
+                next = n
+                break
+        self.prev = next
+        self.nodes.remove(next)
+        return next
+
+    def insert(self, node):
+        self.nodes.insert(0, node)
+
+    def __len__(self):
+        return self.nodes.__len__()
+
+
+class keysorter:
+    """Key-based sort, ties broken by insertion order"""
+
+    def __init__(self, keyfn):
+        self.heap = []
+        self.keyfn = keyfn
+        self.counter = 0
+
+    def picknext(self):
+        return heapq.heappop(self.heap)[2]
+
+    def insert(self, node):
+        counter = self.counter
+        self.counter = counter + 1
+        key = self.keyfn(node)
+        heapq.heappush(self.heap, (key, counter, node))
+
+    def __len__(self):
+        return self.heap.__len__()
+
+
 class converter:
     def __init__(self, ui, source, dest, revmapfile, opts):
 
@@ -364,37 +418,10 @@
 
             return children, roots
 
-        # Sort functions are supposed to take a list of revisions which
-        # can be converted immediately and pick one
-
-        def makebranchsorter():
-            """If the previously converted revision has a child in the
-            eligible revisions list, pick it. Return the list head
-            otherwise. Branch sort attempts to minimize branch
-            switching, which is harmful for Mercurial backend
-            compression.
-            """
-            prev = [None]
-
-            def picknext(nodes):
-                next = nodes[0]
-                for n in nodes:
-                    if prev[0] in parents[n]:
-                        next = n
-                        break
-                prev[0] = next
-                return next
-
-            return picknext
-
         def makesourcesorter():
             """Source specific sort."""
             keyfn = lambda n: self.commitcache[n].sortkey
-
-            def picknext(nodes):
-                return sorted(nodes, key=keyfn)[0]
-
-            return picknext
+            return keysorter(keyfn)
 
         def makeclosesorter():
             """Close order sort."""
@@ -402,44 +429,36 @@
                 b'close' not in self.commitcache[n].extra,
                 self.commitcache[n].sortkey,
             )
-
-            def picknext(nodes):
-                return sorted(nodes, key=keyfn)[0]
-
-            return picknext
+            return keysorter(keyfn)
 
         def makedatesorter():
             """Sort revisions by date."""
-            dates = {}
 
             def getdate(n):
-                if n not in dates:
-                    dates[n] = dateutil.parsedate(self.commitcache[n].date)
-                return dates[n]
+                return dateutil.parsedate(self.commitcache[n].date)
 
-            def picknext(nodes):
-                return min([(getdate(n), n) for n in nodes])[1]
-
-            return picknext
+            return keysorter(getdate)
 
         if sortmode == b'branchsort':
-            picknext = makebranchsorter()
+            sorter = branchsorter(parents)
         elif sortmode == b'datesort':
-            picknext = makedatesorter()
+            sorter = makedatesorter()
         elif sortmode == b'sourcesort':
-            picknext = makesourcesorter()
+            sorter = makesourcesorter()
         elif sortmode == b'closesort':
-            picknext = makeclosesorter()
+            sorter = makeclosesorter()
         else:
             raise error.Abort(_(b'unknown sort mode: %s') % sortmode)
 
-        children, actives = mapchildren(parents)
+        children, roots = mapchildren(parents)
+
+        for node in roots:
+            sorter.insert(node)
 
         s = []
         pendings = {}
-        while actives:
-            n = picknext(actives)
-            actives.remove(n)
+        while sorter:
+            n = sorter.picknext()
             s.append(n)
 
             # Update dependents list
@@ -455,7 +474,7 @@
                     )
                 if not pendings[c]:
                     # Parents are converted, node is eligible
-                    actives.insert(0, c)
+                    sorter.insert(c)
                     pendings[c] = None
 
         if len(s) != len(parents):
--- a/hgext/convert/hg.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/convert/hg.py	Thu Mar 02 22:45:44 2023 +0100
@@ -608,7 +608,10 @@
             files = copyfiles = ctx.manifest()
         if parents:
             if self._changescache[0] == rev:
-                ma, r = self._changescache[1]
+                # TODO: add type hints to avoid this warning, instead of
+                #  suppressing it:
+                #     No attribute '__iter__' on None [attribute-error]
+                ma, r = self._changescache[1]  # pytype: disable=attribute-error
             else:
                 ma, r = self._changedfiles(parents[0], ctx)
             if not full:
--- a/hgext/convert/monotone.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/convert/monotone.py	Thu Mar 02 22:45:44 2023 +0100
@@ -243,6 +243,7 @@
             m = self.cert_re.match(e)
             if m:
                 name, value = m.groups()
+                assert value is not None  # help pytype
                 value = value.replace(br'\"', b'"')
                 value = value.replace(br'\\', b'\\')
                 certs[name] = value
--- a/hgext/convert/subversion.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/convert/subversion.py	Thu Mar 02 22:45:44 2023 +0100
@@ -47,11 +47,14 @@
 # these bindings.
 
 try:
+    # pytype: disable=import-error
     import svn
     import svn.client
     import svn.core
     import svn.ra
     import svn.delta
+
+    # pytype: enable=import-error
     from . import transport
     import warnings
 
@@ -722,7 +725,13 @@
     def getchanges(self, rev, full):
         # reuse cache from getchangedfiles
         if self._changescache[0] == rev and not full:
+            # TODO: add type hints to avoid this warning, instead of
+            #  suppressing it:
+            #     No attribute '__iter__' on None [attribute-error]
+
+            # pytype: disable=attribute-error
             (files, copies) = self._changescache[1]
+            # pytype: enable=attribute-error
         else:
             (files, copies) = self._getchanges(rev, full)
             # caller caches the result, so free it here to release memory
--- a/hgext/convert/transport.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/convert/transport.py	Thu Mar 02 22:45:44 2023 +0100
@@ -17,10 +17,13 @@
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, see <http://www.gnu.org/licenses/>.
 
+# pytype: disable=import-error
 import svn.client
 import svn.core
 import svn.ra
 
+# pytype: enable=import-error
+
 Pool = svn.core.Pool
 SubversionException = svn.core.SubversionException
 
@@ -37,7 +40,7 @@
 
 def _create_auth_baton(pool):
     """Create a Subversion authentication baton."""
-    import svn.client
+    import svn.client  # pytype: disable=import-error
 
     # Give the client context baton a suite of authentication
     # providers.h
--- a/hgext/eol.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/eol.py	Thu Mar 02 22:45:44 2023 +0100
@@ -421,30 +421,31 @@
                 wlock = None
                 try:
                     wlock = self.wlock()
-                    for f in self.dirstate:
-                        if not self.dirstate.get_entry(f).maybe_clean:
-                            continue
-                        if oldeol is not None:
-                            if not oldeol.match(f) and not neweol.match(f):
+                    with self.dirstate.changing_files(self):
+                        for f in self.dirstate:
+                            if not self.dirstate.get_entry(f).maybe_clean:
                                 continue
-                            oldkey = None
-                            for pattern, key, m in oldeol.patterns:
-                                if m(f):
-                                    oldkey = key
-                                    break
-                            newkey = None
-                            for pattern, key, m in neweol.patterns:
-                                if m(f):
-                                    newkey = key
-                                    break
-                            if oldkey == newkey:
-                                continue
-                        # all normal files need to be looked at again since
-                        # the new .hgeol file specify a different filter
-                        self.dirstate.set_possibly_dirty(f)
-                    # Write the cache to update mtime and cache .hgeol
-                    with self.vfs(b"eol.cache", b"w") as f:
-                        f.write(hgeoldata)
+                            if oldeol is not None:
+                                if not oldeol.match(f) and not neweol.match(f):
+                                    continue
+                                oldkey = None
+                                for pattern, key, m in oldeol.patterns:
+                                    if m(f):
+                                        oldkey = key
+                                        break
+                                newkey = None
+                                for pattern, key, m in neweol.patterns:
+                                    if m(f):
+                                        newkey = key
+                                        break
+                                if oldkey == newkey:
+                                    continue
+                            # all normal files need to be looked at again since
+                            # the new .hgeol file specify a different filter
+                            self.dirstate.set_possibly_dirty(f)
+                        # Write the cache to update mtime and cache .hgeol
+                        with self.vfs(b"eol.cache", b"w") as f:
+                            f.write(hgeoldata)
                 except errormod.LockUnavailable:
                     # If we cannot lock the repository and clear the
                     # dirstate, then a commit might not see all files
--- a/hgext/fastannotate/protocol.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fastannotate/protocol.py	Thu Mar 02 22:45:44 2023 +0100
@@ -151,8 +151,11 @@
     ui = repo.ui
 
     remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
-    r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest)
-    remotepath = r[0]
+    remotepath = urlutil.get_unique_pull_path_obj(
+        b'fastannotate',
+        ui,
+        remotedest,
+    )
     peer = hg.peer(ui, {}, remotepath)
 
     try:
--- a/hgext/fetch.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fetch.py	Thu Mar 02 22:45:44 2023 +0100
@@ -108,9 +108,9 @@
                 )
             )
 
-        path = urlutil.get_unique_pull_path(b'fetch', repo, ui, source)[0]
+        path = urlutil.get_unique_pull_path_obj(b'fetch', ui, source)
         other = hg.peer(repo, opts, path)
-        ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path))
+        ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
         revs = None
         if opts[b'rev']:
             try:
--- a/hgext/fix.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fix.py	Thu Mar 02 22:45:44 2023 +0100
@@ -779,7 +779,7 @@
     newp1 = replacements.get(oldp1, oldp1)
     if newp1 != oldp1:
         assert repo.dirstate.p2() == nullid
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             scmutil.movedirstate(repo, repo[newp1])
 
 
--- a/hgext/fsmonitor/pywatchman/__init__.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fsmonitor/pywatchman/__init__.py	Thu Mar 02 22:45:44 2023 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import inspect
 import math
 import os
@@ -94,7 +92,9 @@
 
     LPDWORD = ctypes.POINTER(wintypes.DWORD)
 
-    CreateFile = ctypes.windll.kernel32.CreateFileA
+    _kernel32 = ctypes.windll.kernel32  # pytype: disable=module-attr
+
+    CreateFile = _kernel32.CreateFileA
     CreateFile.argtypes = [
         wintypes.LPSTR,
         wintypes.DWORD,
@@ -106,11 +106,11 @@
     ]
     CreateFile.restype = wintypes.HANDLE
 
-    CloseHandle = ctypes.windll.kernel32.CloseHandle
+    CloseHandle = _kernel32.CloseHandle
     CloseHandle.argtypes = [wintypes.HANDLE]
     CloseHandle.restype = wintypes.BOOL
 
-    ReadFile = ctypes.windll.kernel32.ReadFile
+    ReadFile = _kernel32.ReadFile
     ReadFile.argtypes = [
         wintypes.HANDLE,
         wintypes.LPVOID,
@@ -120,7 +120,7 @@
     ]
     ReadFile.restype = wintypes.BOOL
 
-    WriteFile = ctypes.windll.kernel32.WriteFile
+    WriteFile = _kernel32.WriteFile
     WriteFile.argtypes = [
         wintypes.HANDLE,
         wintypes.LPVOID,
@@ -130,15 +130,15 @@
     ]
     WriteFile.restype = wintypes.BOOL
 
-    GetLastError = ctypes.windll.kernel32.GetLastError
+    GetLastError = _kernel32.GetLastError
     GetLastError.argtypes = []
     GetLastError.restype = wintypes.DWORD
 
-    SetLastError = ctypes.windll.kernel32.SetLastError
+    SetLastError = _kernel32.SetLastError
     SetLastError.argtypes = [wintypes.DWORD]
     SetLastError.restype = None
 
-    FormatMessage = ctypes.windll.kernel32.FormatMessageA
+    FormatMessage = _kernel32.FormatMessageA
     FormatMessage.argtypes = [
         wintypes.DWORD,
         wintypes.LPVOID,
@@ -150,9 +150,9 @@
     ]
     FormatMessage.restype = wintypes.DWORD
 
-    LocalFree = ctypes.windll.kernel32.LocalFree
+    LocalFree = _kernel32.LocalFree
 
-    GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
+    GetOverlappedResult = _kernel32.GetOverlappedResult
     GetOverlappedResult.argtypes = [
         wintypes.HANDLE,
         ctypes.POINTER(OVERLAPPED),
@@ -161,9 +161,7 @@
     ]
     GetOverlappedResult.restype = wintypes.BOOL
 
-    GetOverlappedResultEx = getattr(
-        ctypes.windll.kernel32, "GetOverlappedResultEx", None
-    )
+    GetOverlappedResultEx = getattr(_kernel32, "GetOverlappedResultEx", None)
     if GetOverlappedResultEx is not None:
         GetOverlappedResultEx.argtypes = [
             wintypes.HANDLE,
@@ -174,7 +172,7 @@
         ]
         GetOverlappedResultEx.restype = wintypes.BOOL
 
-    WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx
+    WaitForSingleObjectEx = _kernel32.WaitForSingleObjectEx
     WaitForSingleObjectEx.argtypes = [
         wintypes.HANDLE,
         wintypes.DWORD,
@@ -182,7 +180,7 @@
     ]
     WaitForSingleObjectEx.restype = wintypes.DWORD
 
-    CreateEvent = ctypes.windll.kernel32.CreateEventA
+    CreateEvent = _kernel32.CreateEventA
     CreateEvent.argtypes = [
         LPDWORD,
         wintypes.BOOL,
@@ -192,7 +190,7 @@
     CreateEvent.restype = wintypes.HANDLE
 
     # Windows Vista is the minimum supported client for CancelIoEx.
-    CancelIoEx = ctypes.windll.kernel32.CancelIoEx
+    CancelIoEx = _kernel32.CancelIoEx
     CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)]
     CancelIoEx.restype = wintypes.BOOL
 
@@ -691,9 +689,9 @@
         if self.closed:
             self.close()
             self.closed = False
-        self._connect()
-        res = self.proc.stdin.write(data)
-        self.proc.stdin.close()
+        proc = self._connect()
+        res = proc.stdin.write(data)
+        proc.stdin.close()
         self.closed = True
         return res
 
@@ -988,8 +986,12 @@
                 # if invoked via an application with graphical user interface,
                 # this call will cause a brief command window pop-up.
                 # Using the flag STARTF_USESHOWWINDOW to avoid this behavior.
+
+                # pytype: disable=module-attr
                 startupinfo = subprocess.STARTUPINFO()
                 startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+                # pytype: enable=module-attr
+
                 args["startupinfo"] = startupinfo
 
             p = subprocess.Popen(cmd, **args)
@@ -1026,7 +1028,11 @@
         if self.transport == CLIProcessTransport:
             kwargs["binpath"] = self.binpath
 
+        # Only CLIProcessTransport has the binpath kwarg
+        # pytype: disable=wrong-keyword-args
         self.tport = self.transport(self.sockpath, self.timeout, **kwargs)
+        # pytype: enable=wrong-keyword-args
+
         self.sendConn = self.sendCodec(self.tport)
         self.recvConn = self.recvCodec(self.tport)
         self.pid = os.getpid()
--- a/hgext/fsmonitor/pywatchman/capabilities.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fsmonitor/pywatchman/capabilities.py	Thu Mar 02 22:45:44 2023 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 
 def parse_version(vstr):
     res = 0
--- a/hgext/fsmonitor/pywatchman/compat.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fsmonitor/pywatchman/compat.py	Thu Mar 02 22:45:44 2023 +0100
@@ -26,45 +26,28 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import sys
 
 
 """Compatibility module across Python 2 and 3."""
 
 
-PYTHON2 = sys.version_info < (3, 0)
 PYTHON3 = sys.version_info >= (3, 0)
 
 # This is adapted from https://bitbucket.org/gutworth/six, and used under the
 # MIT license. See LICENSE for a full copyright notice.
-if PYTHON3:
-
-    def reraise(tp, value, tb=None):
-        try:
-            if value is None:
-                value = tp()
-            if value.__traceback__ is not tb:
-                raise value.with_traceback(tb)
-            raise value
-        finally:
-            value = None
-            tb = None
 
 
-else:
-    exec(
-        """
 def reraise(tp, value, tb=None):
     try:
-        raise tp, value, tb
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
     finally:
+        value = None
         tb = None
-""".strip()
-    )
+
 
-if PYTHON3:
-    UNICODE = str
-else:
-    UNICODE = unicode  # noqa: F821 We handled versioning above
+UNICODE = str
--- a/hgext/fsmonitor/pywatchman/encoding.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fsmonitor/pywatchman/encoding.py	Thu Mar 02 22:45:44 2023 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import sys
 
 from . import compat
--- a/hgext/fsmonitor/pywatchman/load.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fsmonitor/pywatchman/load.py	Thu Mar 02 22:45:44 2023 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import ctypes
 
 
--- a/hgext/fsmonitor/pywatchman/pybser.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/fsmonitor/pywatchman/pybser.py	Thu Mar 02 22:45:44 2023 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import binascii
 import collections
 import ctypes
@@ -53,17 +51,15 @@
 BSER_SKIP = b"\x0c"
 BSER_UTF8STRING = b"\x0d"
 
-if compat.PYTHON3:
-    STRING_TYPES = (str, bytes)
-    unicode = str
+STRING_TYPES = (str, bytes)
+unicode = str
+
 
-    def tobytes(i):
-        return str(i).encode("ascii")
+def tobytes(i):
+    return str(i).encode("ascii")
 
-    long = int
-else:
-    STRING_TYPES = (unicode, str)
-    tobytes = bytes
+
+long = int
 
 # Leave room for the serialization header, which includes
 # our overall length.  To make things simpler, we'll use an
@@ -89,7 +85,7 @@
 def _buf_pos(buf, pos):
     ret = buf[pos]
     # Normalize the return type to bytes
-    if compat.PYTHON3 and not isinstance(ret, bytes):
+    if not isinstance(ret, bytes):
         ret = bytes((ret,))
     return ret
 
@@ -252,10 +248,7 @@
             else:
                 raise RuntimeError("Cannot represent this mapping value")
             self.wpos += needed
-            if compat.PYTHON3:
-                iteritems = val.items()
-            else:
-                iteritems = val.iteritems()  # noqa: B301 Checked version above
+            iteritems = val.items()
             for k, v in iteritems:
                 self.append_string(k)
                 self.append_recursive(v)
--- a/hgext/git/dirstate.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/git/dirstate.py	Thu Mar 02 22:45:44 2023 +0100
@@ -260,7 +260,12 @@
     # # TODO what the heck is this
     _filecache = set()
 
-    def pendingparentchange(self):
+    def is_changing_parents(self):
+        # TODO: we need to implement the context manager bits and
+        # correctly stage/revert index edits.
+        return False
+
+    def is_changing_any(self):
         # TODO: we need to implement the context manager bits and
         # correctly stage/revert index edits.
         return False
@@ -322,14 +327,6 @@
             r[path] = s
         return r
 
-    def savebackup(self, tr, backupname):
-        # TODO: figure out a strategy for saving index backups.
-        pass
-
-    def restorebackup(self, tr, backupname):
-        # TODO: figure out a strategy for saving index backups.
-        pass
-
     def set_tracked(self, f, reset_copy=False):
         # TODO: support copies and reset_copy=True
         uf = pycompat.fsdecode(f)
@@ -384,7 +381,7 @@
         pass
 
     @contextlib.contextmanager
-    def parentchange(self):
+    def changing_parents(self, repo):
         # TODO: track this maybe?
         yield
 
@@ -392,11 +389,7 @@
         # TODO: should this be added to the dirstate interface?
         self._plchangecallbacks[category] = callback
 
-    def clearbackup(self, tr, backupname):
-        # TODO
-        pass
-
-    def setbranch(self, branch):
+    def setbranch(self, branch, transaction=None):
         raise error.Abort(
             b'git repos do not support branches. try using bookmarks'
         )
--- a/hgext/git/gitutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/git/gitutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -9,7 +9,7 @@
     global pygit2_module
     if pygit2_module is None:
         try:
-            import pygit2 as pygit2_module
+            import pygit2 as pygit2_module  # pytype: disable=import-error
 
             pygit2_module.InvalidSpecError
         except (ImportError, AttributeError):
--- a/hgext/gpg.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/gpg.py	Thu Mar 02 22:45:44 2023 +0100
@@ -352,7 +352,8 @@
     sigsfile.close()
 
     if b'.hgsigs' not in repo.dirstate:
-        repo[None].add([b".hgsigs"])
+        with repo.dirstate.changing_files(repo):
+            repo[None].add([b".hgsigs"])
 
     if opts[b"no_commit"]:
         return
--- a/hgext/histedit.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/histedit.py	Thu Mar 02 22:45:44 2023 +0100
@@ -581,7 +581,7 @@
         with repo.ui.silent():
             hg.update(repo, self.state.parentctxnode, quietempty=True)
         stats = applychanges(repo.ui, repo, rulectx, {})
-        repo.dirstate.setbranch(rulectx.branch())
+        repo.dirstate.setbranch(rulectx.branch(), repo.currenttransaction())
         if stats.unresolvedcount:
             raise error.InterventionRequired(
                 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
@@ -1051,12 +1051,11 @@
     if opts is None:
         opts = {}
     path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
-    dest = path.pushloc or path.loc
-
-    ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
+
+    ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
 
     revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
-    other = hg.peer(repo, opts, dest)
+    other = hg.peer(repo, opts, path)
 
     if revs:
         revs = [repo.lookup(rev) for rev in revs]
--- a/hgext/hooklib/changeset_obsoleted.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/hooklib/changeset_obsoleted.py	Thu Mar 02 22:45:44 2023 +0100
@@ -32,7 +32,10 @@
     pycompat,
     registrar,
 )
-from mercurial.utils import dateutil
+from mercurial.utils import (
+    dateutil,
+    stringutil,
+)
 from .. import notify
 
 configtable = {}
@@ -98,7 +101,7 @@
     try:
         msg = mail.parsebytes(data)
     except emailerrors.MessageParseError as inst:
-        raise error.Abort(inst)
+        raise error.Abort(stringutil.forcebytestr(inst))
 
     msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
     msg['Message-Id'] = notify.messageid(
--- a/hgext/hooklib/changeset_published.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/hooklib/changeset_published.py	Thu Mar 02 22:45:44 2023 +0100
@@ -31,7 +31,10 @@
     pycompat,
     registrar,
 )
-from mercurial.utils import dateutil
+from mercurial.utils import (
+    dateutil,
+    stringutil,
+)
 from .. import notify
 
 configtable = {}
@@ -97,7 +100,7 @@
     try:
         msg = mail.parsebytes(data)
     except emailerrors.MessageParseError as inst:
-        raise error.Abort(inst)
+        raise error.Abort(stringutil.forcebytestr(inst))
 
     msg['In-reply-to'] = notify.messageid(ctx, domain, messageidseed)
     msg['Message-Id'] = notify.messageid(
--- a/hgext/infinitepush/__init__.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/infinitepush/__init__.py	Thu Mar 02 22:45:44 2023 +0100
@@ -683,12 +683,10 @@
 def _pull(orig, ui, repo, source=b"default", **opts):
     opts = pycompat.byteskwargs(opts)
     # Copy paste from `pull` command
-    source, branches = urlutil.get_unique_pull_path(
+    path = urlutil.get_unique_pull_path_obj(
         b"infinite-push's pull",
-        repo,
         ui,
         source,
-        default_branches=opts.get(b'branch'),
     )
 
     scratchbookmarks = {}
@@ -709,7 +707,7 @@
                 bookmarks.append(bookmark)
 
         if scratchbookmarks:
-            other = hg.peer(repo, opts, source)
+            other = hg.peer(repo, opts, path)
             try:
                 fetchedbookmarks = other.listkeyspatterns(
                     b'bookmarks', patterns=scratchbookmarks
@@ -734,14 +732,14 @@
     try:
         # Remote scratch bookmarks will be deleted because remotenames doesn't
         # know about them. Let's save it before pull and restore after
-        remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
-        result = orig(ui, repo, source, **pycompat.strkwargs(opts))
+        remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, path.loc)
+        result = orig(ui, repo, path.loc, **pycompat.strkwargs(opts))
         # TODO(stash): race condition is possible
         # if scratch bookmarks was updated right after orig.
         # But that's unlikely and shouldn't be harmful.
         if common.isremotebooksenabled(ui):
             remotescratchbookmarks.update(scratchbookmarks)
-            _saveremotebookmarks(repo, remotescratchbookmarks, source)
+            _saveremotebookmarks(repo, remotescratchbookmarks, path.loc)
         else:
             _savelocalbookmarks(repo, scratchbookmarks)
         return result
@@ -849,14 +847,14 @@
             raise error.Abort(msg)
 
         path = paths[0]
-        destpath = path.pushloc or path.loc
+        destpath = path.loc
         # Remote scratch bookmarks will be deleted because remotenames doesn't
         # know about them. Let's save it before push and restore after
         remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
         result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
         if common.isremotebooksenabled(ui):
             if bookmark and scratchpush:
-                other = hg.peer(repo, opts, destpath)
+                other = hg.peer(repo, opts, path)
                 try:
                     fetchedbookmarks = other.listkeyspatterns(
                         b'bookmarks', patterns=[bookmark]
--- a/hgext/journal.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/journal.py	Thu Mar 02 22:45:44 2023 +0100
@@ -567,8 +567,12 @@
         )
         fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes))
         fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user)
+
+        # ``name`` is bytes, or None only if 'all' was an option.
         fm.condwrite(
+            # pytype: disable=attribute-error
             opts.get(b'all') or name.startswith(b're:'),
+            # pytype: enable=attribute-error
             b'name',
             b'  %-8s',
             entry.name,
--- a/hgext/keyword.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/keyword.py	Thu Mar 02 22:45:44 2023 +0100
@@ -437,7 +437,7 @@
     if len(wctx.parents()) > 1:
         raise error.Abort(_(b'outstanding uncommitted merge'))
     kwt = getattr(repo, '_keywordkwt', None)
-    with repo.wlock():
+    with repo.wlock(), repo.dirstate.changing_files(repo):
         status = _status(ui, repo, wctx, kwt, *pats, **opts)
         if status.modified or status.added or status.removed or status.deleted:
             raise error.Abort(_(b'outstanding uncommitted changes'))
@@ -530,17 +530,18 @@
     demoitems(b'keywordmaps', kwmaps.items())
     keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
     repo.wvfs.write(fn, keywords)
-    repo[None].add([fn])
-    ui.note(_(b'\nkeywords written to %s:\n') % fn)
-    ui.note(keywords)
     with repo.wlock():
-        repo.dirstate.setbranch(b'demobranch')
-    for name, cmd in ui.configitems(b'hooks'):
-        if name.split(b'.', 1)[0].find(b'commit') > -1:
-            repo.ui.setconfig(b'hooks', name, b'', b'keyword')
-    msg = _(b'hg keyword configuration and expansion example')
-    ui.note((b"hg ci -m '%s'\n" % msg))
-    repo.commit(text=msg)
+        with repo.dirstate.changing_files(repo):
+            repo[None].add([fn])
+        ui.note(_(b'\nkeywords written to %s:\n') % fn)
+        ui.note(keywords)
+        repo.dirstate.setbranch(b'demobranch', repo.currenttransaction())
+        for name, cmd in ui.configitems(b'hooks'):
+            if name.split(b'.', 1)[0].find(b'commit') > -1:
+                repo.ui.setconfig(b'hooks', name, b'', b'keyword')
+        msg = _(b'hg keyword configuration and expansion example')
+        ui.note((b"hg ci -m '%s'\n" % msg))
+        repo.commit(text=msg)
     ui.status(_(b'\n\tkeywords expanded\n'))
     ui.write(repo.wread(fn))
     repo.wvfs.rmtree(repo.root)
@@ -696,7 +697,7 @@
     kwt = getattr(repo, '_keywordkwt', None)
     if kwt is None:
         return orig(ui, repo, old, extra, pats, opts)
-    with repo.wlock(), repo.dirstate.parentchange():
+    with repo.wlock(), repo.dirstate.changing_parents(repo):
         kwt.postcommit = True
         newid = orig(ui, repo, old, extra, pats, opts)
         if newid != old.node():
@@ -762,7 +763,7 @@
         if ctx != recctx:
             modified, added = _preselect(wstatus, recctx.files())
             kwt.restrict = False
-            with repo.dirstate.parentchange():
+            with repo.dirstate.changing_parents(repo):
                 kwt.overwrite(recctx, modified, False, True)
                 kwt.overwrite(recctx, added, False, True, True)
             kwt.restrict = True
--- a/hgext/largefiles/__init__.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/largefiles/__init__.py	Thu Mar 02 22:45:44 2023 +0100
@@ -107,6 +107,7 @@
 
 from mercurial import (
     cmdutil,
+    configitems,
     extensions,
     exthelper,
     hg,
@@ -135,7 +136,7 @@
 eh.configitem(
     b'largefiles',
     b'minsize',
-    default=eh.configitem.dynamicdefault,
+    default=configitems.dynamicdefault,
 )
 eh.configitem(
     b'largefiles',
--- a/hgext/largefiles/lfcommands.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/largefiles/lfcommands.py	Thu Mar 02 22:45:44 2023 +0100
@@ -219,7 +219,9 @@
         success = True
     finally:
         if tolfile:
-            rdst.dirstate.clear()
+            # XXX is this the right context semantically ?
+            with rdst.dirstate.changing_parents(rdst):
+                rdst.dirstate.clear()
             release(dstlock, dstwlock)
         if not success:
             # we failed, remove the new directory
@@ -517,53 +519,52 @@
             filelist = set(filelist)
             lfiles = [f for f in lfiles if f in filelist]
 
-        with lfdirstate.parentchange():
-            update = {}
-            dropped = set()
-            updated, removed = 0, 0
-            wvfs = repo.wvfs
-            wctx = repo[None]
-            for lfile in lfiles:
-                lfileorig = os.path.relpath(
-                    scmutil.backuppath(ui, repo, lfile), start=repo.root
-                )
-                standin = lfutil.standin(lfile)
-                standinorig = os.path.relpath(
-                    scmutil.backuppath(ui, repo, standin), start=repo.root
-                )
-                if wvfs.exists(standin):
-                    if wvfs.exists(standinorig) and wvfs.exists(lfile):
-                        shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
-                        wvfs.unlinkpath(standinorig)
-                    expecthash = lfutil.readasstandin(wctx[standin])
-                    if expecthash != b'':
-                        if lfile not in wctx:  # not switched to normal file
-                            if repo.dirstate.get_entry(standin).any_tracked:
-                                wvfs.unlinkpath(lfile, ignoremissing=True)
-                            else:
-                                dropped.add(lfile)
+        update = {}
+        dropped = set()
+        updated, removed = 0, 0
+        wvfs = repo.wvfs
+        wctx = repo[None]
+        for lfile in lfiles:
+            lfileorig = os.path.relpath(
+                scmutil.backuppath(ui, repo, lfile), start=repo.root
+            )
+            standin = lfutil.standin(lfile)
+            standinorig = os.path.relpath(
+                scmutil.backuppath(ui, repo, standin), start=repo.root
+            )
+            if wvfs.exists(standin):
+                if wvfs.exists(standinorig) and wvfs.exists(lfile):
+                    shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
+                    wvfs.unlinkpath(standinorig)
+                expecthash = lfutil.readasstandin(wctx[standin])
+                if expecthash != b'':
+                    if lfile not in wctx:  # not switched to normal file
+                        if repo.dirstate.get_entry(standin).any_tracked:
+                            wvfs.unlinkpath(lfile, ignoremissing=True)
+                        else:
+                            dropped.add(lfile)
 
-                        # use normallookup() to allocate an entry in largefiles
-                        # dirstate to prevent lfilesrepo.status() from reporting
-                        # missing files as removed.
-                        lfdirstate.update_file(
-                            lfile,
-                            p1_tracked=True,
-                            wc_tracked=True,
-                            possibly_dirty=True,
-                        )
-                        update[lfile] = expecthash
-                else:
-                    # Remove lfiles for which the standin is deleted, unless the
-                    # lfile is added to the repository again. This happens when a
-                    # largefile is converted back to a normal file: the standin
-                    # disappears, but a new (normal) file appears as the lfile.
-                    if (
-                        wvfs.exists(lfile)
-                        and repo.dirstate.normalize(lfile) not in wctx
-                    ):
-                        wvfs.unlinkpath(lfile)
-                        removed += 1
+                    # allocate an entry in largefiles dirstate to prevent
+                    # lfilesrepo.status() from reporting missing files as
+                    # removed.
+                    lfdirstate.hacky_extension_update_file(
+                        lfile,
+                        p1_tracked=True,
+                        wc_tracked=True,
+                        possibly_dirty=True,
+                    )
+                    update[lfile] = expecthash
+            else:
+                # Remove lfiles for which the standin is deleted, unless the
+                # lfile is added to the repository again. This happens when a
+                # largefile is converted back to a normal file: the standin
+                # disappears, but a new (normal) file appears as the lfile.
+                if (
+                    wvfs.exists(lfile)
+                    and repo.dirstate.normalize(lfile) not in wctx
+                ):
+                    wvfs.unlinkpath(lfile)
+                    removed += 1
 
         # largefile processing might be slow and be interrupted - be prepared
         lfdirstate.write(repo.currenttransaction())
@@ -580,41 +581,42 @@
             statuswriter(_(b'getting changed largefiles\n'))
             cachelfiles(ui, repo, None, lfiles)
 
-        with lfdirstate.parentchange():
-            for lfile in lfiles:
-                update1 = 0
+        for lfile in lfiles:
+            update1 = 0
 
-                expecthash = update.get(lfile)
-                if expecthash:
-                    if not lfutil.copyfromcache(repo, expecthash, lfile):
-                        # failed ... but already removed and set to normallookup
-                        continue
-                    # Synchronize largefile dirstate to the last modified
-                    # time of the file
-                    lfdirstate.update_file(
-                        lfile, p1_tracked=True, wc_tracked=True
-                    )
+            expecthash = update.get(lfile)
+            if expecthash:
+                if not lfutil.copyfromcache(repo, expecthash, lfile):
+                    # failed ... but already removed and set to normallookup
+                    continue
+                # Synchronize largefile dirstate to the last modified
+                # time of the file
+                lfdirstate.hacky_extension_update_file(
+                    lfile,
+                    p1_tracked=True,
+                    wc_tracked=True,
+                )
+                update1 = 1
+
+            # copy the exec mode of largefile standin from the repository's
+            # dirstate to its state in the lfdirstate.
+            standin = lfutil.standin(lfile)
+            if wvfs.exists(standin):
+                # exec is decided by the users permissions using mask 0o100
+                standinexec = wvfs.stat(standin).st_mode & 0o100
+                st = wvfs.stat(lfile)
+                mode = st.st_mode
+                if standinexec != mode & 0o100:
+                    # first remove all X bits, then shift all R bits to X
+                    mode &= ~0o111
+                    if standinexec:
+                        mode |= (mode >> 2) & 0o111 & ~util.umask
+                    wvfs.chmod(lfile, mode)
                     update1 = 1
 
-                # copy the exec mode of largefile standin from the repository's
-                # dirstate to its state in the lfdirstate.
-                standin = lfutil.standin(lfile)
-                if wvfs.exists(standin):
-                    # exec is decided by the users permissions using mask 0o100
-                    standinexec = wvfs.stat(standin).st_mode & 0o100
-                    st = wvfs.stat(lfile)
-                    mode = st.st_mode
-                    if standinexec != mode & 0o100:
-                        # first remove all X bits, then shift all R bits to X
-                        mode &= ~0o111
-                        if standinexec:
-                            mode |= (mode >> 2) & 0o111 & ~util.umask
-                        wvfs.chmod(lfile, mode)
-                        update1 = 1
+            updated += update1
 
-                updated += update1
-
-                lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
+            lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
 
         lfdirstate.write(repo.currenttransaction())
         if lfiles:
--- a/hgext/largefiles/lfutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/largefiles/lfutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -159,6 +159,9 @@
 
 
 class largefilesdirstate(dirstate.dirstate):
+    _large_file_dirstate = True
+    _tr_key_suffix = b'-large-files'
+
     def __getitem__(self, key):
         return super(largefilesdirstate, self).__getitem__(unixpath(key))
 
@@ -204,7 +207,13 @@
     """
     Return a dirstate object that tracks largefiles: i.e. its root is
     the repo root, but it is saved in .hg/largefiles/dirstate.
+
+    If a dirstate object already exists and is being used for a 'changing_*'
+    context, it will be returned.
     """
+    sub_dirstate = getattr(repo.dirstate, '_sub_dirstate', None)
+    if sub_dirstate is not None:
+        return sub_dirstate
     vfs = repo.vfs
     lfstoredir = longname
     opener = vfsmod.vfs(vfs.join(lfstoredir))
@@ -223,20 +232,29 @@
     # it. This ensures that we create it on the first meaningful
     # largefiles operation in a new clone.
     if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
-        matcher = getstandinmatcher(repo)
-        standins = repo.dirstate.walk(
-            matcher, subrepos=[], unknown=False, ignored=False
-        )
+        try:
+            with repo.wlock(wait=False), lfdirstate.changing_files(repo):
+                matcher = getstandinmatcher(repo)
+                standins = repo.dirstate.walk(
+                    matcher, subrepos=[], unknown=False, ignored=False
+                )
+
+                if len(standins) > 0:
+                    vfs.makedirs(lfstoredir)
 
-        if len(standins) > 0:
-            vfs.makedirs(lfstoredir)
-
-        with lfdirstate.parentchange():
-            for standin in standins:
-                lfile = splitstandin(standin)
-                lfdirstate.update_file(
-                    lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
-                )
+                for standin in standins:
+                    lfile = splitstandin(standin)
+                    lfdirstate.hacky_extension_update_file(
+                        lfile,
+                        p1_tracked=True,
+                        wc_tracked=True,
+                        possibly_dirty=True,
+                    )
+        except error.LockError:
+            # Assume that whatever was holding the lock was important.
+            # If we were doing something important, we would already have
+            # either the lock or a largefile dirstate.
+            pass
     return lfdirstate
 
 
@@ -565,10 +583,14 @@
 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
     lfstandin = standin(lfile)
     if lfstandin not in repo.dirstate:
-        lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
+        lfdirstate.hacky_extension_update_file(
+            lfile,
+            p1_tracked=False,
+            wc_tracked=False,
+        )
     else:
         entry = repo.dirstate.get_entry(lfstandin)
-        lfdirstate.update_file(
+        lfdirstate.hacky_extension_update_file(
             lfile,
             wc_tracked=entry.tracked,
             p1_tracked=entry.p1_tracked,
@@ -580,8 +602,7 @@
 def markcommitted(orig, ctx, node):
     repo = ctx.repo()
 
-    lfdirstate = openlfdirstate(repo.ui, repo)
-    with lfdirstate.parentchange():
+    with repo.dirstate.changing_parents(repo):
         orig(node)
 
         # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
@@ -593,11 +614,11 @@
         # - have to be marked as "n" after commit, but
         # - aren't listed in "repo[node].files()"
 
+        lfdirstate = openlfdirstate(repo.ui, repo)
         for f in ctx.files():
             lfile = splitstandin(f)
             if lfile is not None:
                 synclfdirstate(repo, lfdirstate, lfile, False)
-    lfdirstate.write(repo.currenttransaction())
 
     # As part of committing, copy all of the largefiles into the cache.
     #
@@ -668,11 +689,16 @@
         # It can cost a lot of time (several seconds)
         # otherwise to update all standins if the largefiles are
         # large.
-        lfdirstate = openlfdirstate(ui, repo)
         dirtymatch = matchmod.always()
-        unsure, s, mtime_boundary = lfdirstate.status(
-            dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
-        )
+        with repo.dirstate.running_status(repo):
+            lfdirstate = openlfdirstate(ui, repo)
+            unsure, s, mtime_boundary = lfdirstate.status(
+                dirtymatch,
+                subrepos=[],
+                ignored=False,
+                clean=False,
+                unknown=False,
+            )
         modifiedfiles = unsure + s.modified + s.added + s.removed
         lfiles = listlfiles(repo)
         # this only loops through largefiles that exist (not
--- a/hgext/largefiles/overrides.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/largefiles/overrides.py	Thu Mar 02 22:45:44 2023 +0100
@@ -8,6 +8,7 @@
 
 '''Overridden Mercurial commands and functions for the largefiles extension'''
 
+import contextlib
 import copy
 import os
 
@@ -21,6 +22,7 @@
     archival,
     cmdutil,
     copies as copiesmod,
+    dirstate,
     error,
     exchange,
     extensions,
@@ -311,6 +313,48 @@
     )
 
 
+@eh.wrapfunction(dirstate.dirstate, b'_changing')
+@contextlib.contextmanager
+def _changing(orig, self, repo, change_type):
+    pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
+    try:
+        lfd = getattr(self, '_large_file_dirstate', False)
+        if sub_dirstate is None and not lfd:
+            sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
+            self._sub_dirstate = sub_dirstate
+        if not lfd:
+            assert self._sub_dirstate is not None
+        with orig(self, repo, change_type):
+            if sub_dirstate is None:
+                yield
+            else:
+                with sub_dirstate._changing(repo, change_type):
+                    yield
+    finally:
+        self._sub_dirstate = pre
+
+
+@eh.wrapfunction(dirstate.dirstate, b'running_status')
+@contextlib.contextmanager
+def running_status(orig, self, repo):
+    pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
+    try:
+        lfd = getattr(self, '_large_file_dirstate', False)
+        if sub_dirstate is None and not lfd:
+            sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
+            self._sub_dirstate = sub_dirstate
+        if not lfd:
+            assert self._sub_dirstate is not None
+        with orig(self, repo):
+            if sub_dirstate is None:
+                yield
+            else:
+                with sub_dirstate.running_status(repo):
+                    yield
+    finally:
+        self._sub_dirstate = pre
+
+
 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
 def overridestatusfn(orig, repo, rev2, **opts):
     with lfstatus(repo._repo):
@@ -511,10 +555,12 @@
 # largefiles. This makes the merge proceed and we can then handle this
 # case further in the overridden calculateupdates function below.
 @eh.wrapfunction(merge, b'_checkunknownfile')
-def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
-    if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
+def overridecheckunknownfile(
+    origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
+):
+    if lfutil.standin(dirstate.normalize(f)) in wctx:
         return False
-    return origfn(repo, wctx, mctx, f, f2)
+    return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
 
 
 # The manifest merge handles conflicts on the manifest level. We want
@@ -658,18 +704,12 @@
 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
     if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
         lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-        with lfdirstate.parentchange():
-            for lfile, args, msg in actions[
-                MERGE_ACTION_LARGEFILE_MARK_REMOVED
-            ]:
-                # this should be executed before 'orig', to execute 'remove'
-                # before all other actions
-                repo.dirstate.update_file(
-                    lfile, p1_tracked=True, wc_tracked=False
-                )
-                # make sure lfile doesn't get synclfdirstate'd as normal
-                lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
-        lfdirstate.write(repo.currenttransaction())
+        for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
+            # this should be executed before 'orig', to execute 'remove'
+            # before all other actions
+            repo.dirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
+            # make sure lfile doesn't get synclfdirstate'd as normal
+            lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
 
     return orig(repo, actions, branchmerge, getfiledata)
 
@@ -901,7 +941,7 @@
     # Because we put the standins in a bad state (by updating them)
     # and then return them to a correct state we need to lock to
     # prevent others from changing them in their incorrect state.
-    with repo.wlock():
+    with repo.wlock(), repo.dirstate.running_status(repo):
         lfdirstate = lfutil.openlfdirstate(ui, repo)
         s = lfutil.lfdirstatestatus(lfdirstate, repo)
         lfdirstate.write(repo.currenttransaction())
@@ -1436,7 +1476,7 @@
 
             def addfunc(fn, lfhash):
                 if fn not in toupload:
-                    toupload[fn] = []
+                    toupload[fn] = []  # pytype: disable=unsupported-operands
                 toupload[fn].append(lfhash)
                 lfhashes.add(lfhash)
 
@@ -1520,20 +1560,34 @@
 
 
 @eh.wrapfunction(scmutil, b'addremove')
-def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
+def scmutiladdremove(
+    orig,
+    repo,
+    matcher,
+    prefix,
+    uipathfn,
+    opts=None,
+    open_tr=None,
+):
     if opts is None:
         opts = {}
     if not lfutil.islfilesrepo(repo):
-        return orig(repo, matcher, prefix, uipathfn, opts)
+        return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
+
+    # open the transaction and changing_files context
+    if open_tr is not None:
+        open_tr()
+
     # Get the list of missing largefiles so we can remove them
-    lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-    unsure, s, mtime_boundary = lfdirstate.status(
-        matchmod.always(),
-        subrepos=[],
-        ignored=False,
-        clean=False,
-        unknown=False,
-    )
+    with repo.dirstate.running_status(repo):
+        lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
+        unsure, s, mtime_boundary = lfdirstate.status(
+            matchmod.always(),
+            subrepos=[],
+            ignored=False,
+            clean=False,
+            unknown=False,
+        )
 
     # Call into the normal remove code, but the removing of the standin, we want
     # to have handled by original addremove.  Monkey patching here makes sure
@@ -1567,7 +1621,8 @@
     # function to take care of the rest.  Make sure it doesn't do anything with
     # largefiles by passing a matcher that will ignore them.
     matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
-    return orig(repo, matcher, prefix, uipathfn, opts)
+
+    return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
 
 
 # Calling purge with --all will cause the largefiles to be deleted.
@@ -1737,7 +1792,7 @@
     matcher = kwargs.get('matcher', None)
     # note if this is a partial update
     partial = matcher and not matcher.always()
-    with repo.wlock():
+    with repo.wlock(), repo.dirstate.changing_parents(repo):
         # branch |       |         |
         #  merge | force | partial | action
         # -------+-------+---------+--------------
@@ -1752,15 +1807,15 @@
         #
         # (*) don't care
         # (*1) deprecated, but used internally (e.g: "rebase --collapse")
-
-        lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
-        unsure, s, mtime_boundary = lfdirstate.status(
-            matchmod.always(),
-            subrepos=[],
-            ignored=False,
-            clean=True,
-            unknown=False,
-        )
+        with repo.dirstate.running_status(repo):
+            lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
+            unsure, s, mtime_boundary = lfdirstate.status(
+                matchmod.always(),
+                subrepos=[],
+                ignored=False,
+                clean=True,
+                unknown=False,
+            )
         oldclean = set(s.clean)
         pctx = repo[b'.']
         dctx = repo[node]
@@ -1787,7 +1842,14 @@
         # mark all clean largefiles as dirty, just in case the update gets
         # interrupted before largefiles and lfdirstate are synchronized
         for lfile in oldclean:
-            lfdirstate.set_possibly_dirty(lfile)
+            entry = lfdirstate.get_entry(lfile)
+            lfdirstate.hacky_extension_update_file(
+                lfile,
+                wc_tracked=entry.tracked,
+                p1_tracked=entry.p1_tracked,
+                p2_info=entry.p2_info,
+                possibly_dirty=True,
+            )
         lfdirstate.write(repo.currenttransaction())
 
         oldstandins = lfutil.getstandinsstate(repo)
@@ -1798,24 +1860,22 @@
             raise error.ProgrammingError(
                 b'largefiles is not compatible with in-memory merge'
             )
-        with lfdirstate.parentchange():
-            result = orig(repo, node, branchmerge, force, *args, **kwargs)
+        result = orig(repo, node, branchmerge, force, *args, **kwargs)
 
-            newstandins = lfutil.getstandinsstate(repo)
-            filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+        newstandins = lfutil.getstandinsstate(repo)
+        filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
 
-            # to avoid leaving all largefiles as dirty and thus rehash them, mark
-            # all the ones that didn't change as clean
-            for lfile in oldclean.difference(filelist):
-                lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
-            lfdirstate.write(repo.currenttransaction())
+        # to avoid leaving all largefiles as dirty and thus rehash them, mark
+        # all the ones that didn't change as clean
+        for lfile in oldclean.difference(filelist):
+            lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
 
-            if branchmerge or force or partial:
-                filelist.extend(s.deleted + s.removed)
+        if branchmerge or force or partial:
+            filelist.extend(s.deleted + s.removed)
 
-            lfcommands.updatelfiles(
-                repo.ui, repo, filelist=filelist, normallookup=partial
-            )
+        lfcommands.updatelfiles(
+            repo.ui, repo, filelist=filelist, normallookup=partial
+        )
 
         return result
 
--- a/hgext/largefiles/reposetup.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/largefiles/reposetup.py	Thu Mar 02 22:45:44 2023 +0100
@@ -139,7 +139,7 @@
             except error.LockError:
                 wlock = util.nullcontextmanager()
                 gotlock = False
-            with wlock:
+            with wlock, self.dirstate.running_status(self):
 
                 # First check if paths or patterns were specified on the
                 # command line.  If there were, and they don't match any
@@ -321,6 +321,8 @@
 
                 if gotlock:
                     lfdirstate.write(self.currenttransaction())
+                else:
+                    lfdirstate.invalidate()
 
             self.lfstatus = True
             return scmutil.status(*result)
--- a/hgext/largefiles/storefactory.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/largefiles/storefactory.py	Thu Mar 02 22:45:44 2023 +0100
@@ -36,22 +36,23 @@
                 b'lfpullsource', repo, ui, lfpullsource
             )
         else:
-            path, _branches = urlutil.get_unique_pull_path(
-                b'lfpullsource', repo, ui, lfpullsource
+            path = urlutil.get_unique_pull_path_obj(
+                b'lfpullsource', ui, lfpullsource
             )
 
         # XXX we should not explicitly pass b'default', as this will result in
         # b'default' being returned if no `paths.default` was defined. We
         # should explicitely handle the lack of value instead.
         if repo is None:
-            path, _branches = urlutil.get_unique_pull_path(
-                b'lfs', repo, ui, b'default'
+            path = urlutil.get_unique_pull_path_obj(
+                b'lfs',
+                ui,
+                b'default',
             )
             remote = hg.peer(repo or ui, {}, path)
-        elif path == b'default-push' or path == b'default':
+        elif path.loc == b'default-push' or path.loc == b'default':
             remote = repo
         else:
-            path, _branches = urlutil.parseurl(path)
             remote = hg.peer(repo or ui, {}, path)
 
     # The path could be a scheme so use Mercurial's normal functionality
--- a/hgext/lfs/blobstore.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/lfs/blobstore.py	Thu Mar 02 22:45:44 2023 +0100
@@ -168,12 +168,16 @@
             # producing the response (but the server has no way of telling us
             # that), and we really don't need to try to write the response to
             # the localstore, because it's not going to match the expected.
+            # The server also uses this method to store data uploaded by the
+            # client, so if this happens on the server side, it's possible
+            # that the client crashed or an antivirus interfered with the
+            # upload.
             if content_length is not None and int(content_length) != size:
                 msg = (
                     b"Response length (%d) does not match Content-Length "
-                    b"header (%d): likely server-side crash"
+                    b"header (%d) for %s"
                 )
-                raise LfsRemoteError(_(msg) % (size, int(content_length)))
+                raise LfsRemoteError(_(msg) % (size, int(content_length), oid))
 
             realoid = hex(sha256.digest())
             if realoid != oid:
--- a/hgext/mq.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/mq.py	Thu Mar 02 22:45:44 2023 +0100
@@ -82,7 +82,6 @@
 from mercurial import (
     cmdutil,
     commands,
-    dirstateguard,
     encoding,
     error,
     extensions,
@@ -791,7 +790,10 @@
         if self.added:
             qrepo = self.qrepo()
             if qrepo:
-                qrepo[None].add(f for f in self.added if f not in qrepo[None])
+                with qrepo.wlock(), qrepo.dirstate.changing_files(qrepo):
+                    qrepo[None].add(
+                        f for f in self.added if f not in qrepo[None]
+                    )
             self.added = []
 
     def removeundo(self, repo):
@@ -1082,7 +1084,7 @@
 
             if merge and files:
                 # Mark as removed/merged and update dirstate parent info
-                with repo.dirstate.parentchange():
+                with repo.dirstate.changing_parents(repo):
                     for f in files:
                         repo.dirstate.update_file_p1(f, p1_tracked=True)
                     p1 = repo.dirstate.p1()
@@ -1129,7 +1131,8 @@
         if not keep:
             r = self.qrepo()
             if r:
-                r[None].forget(patches)
+                with r.wlock(), r.dirstate.changing_files(r):
+                    r[None].forget(patches)
             for p in patches:
                 try:
                     os.unlink(self.join(p))
@@ -1153,7 +1156,7 @@
                 sortedseries.append((idx, p))
 
         sortedseries.sort(reverse=True)
-        for (i, p) in sortedseries:
+        for i, p in sortedseries:
             if i != -1:
                 del self.fullseries[i]
             else:
@@ -1177,7 +1180,6 @@
         firstrev = repo[self.applied[0].node].rev()
         patches = []
         for i, rev in enumerate(revs):
-
             if rev < firstrev:
                 raise error.Abort(_(b'revision %d is not managed') % rev)
 
@@ -1465,7 +1467,8 @@
                     p.close()
                     r = self.qrepo()
                     if r:
-                        r[None].add([patchfn])
+                        with r.wlock(), r.dirstate.changing_files(r):
+                            r[None].add([patchfn])
                 except:  # re-raises
                     repo.rollback()
                     raise
@@ -1830,7 +1833,7 @@
                 if keepchanges and tobackup:
                     raise error.Abort(_(b"local changes found, qrefresh first"))
                 self.backup(repo, tobackup)
-                with repo.dirstate.parentchange():
+                with repo.dirstate.changing_parents(repo):
                     for f in a:
                         repo.wvfs.unlinkpath(f, ignoremissing=True)
                         repo.dirstate.update_file(
@@ -1988,73 +1991,67 @@
 
             bmlist = repo[top].bookmarks()
 
-            with repo.dirstate.parentchange():
-                # XXX do we actually need the dirstateguard
-                dsguard = None
-                try:
-                    dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
-                    if diffopts.git or diffopts.upgrade:
-                        copies = {}
-                        for dst in a:
-                            src = repo.dirstate.copied(dst)
-                            # during qfold, the source file for copies may
-                            # be removed. Treat this as a simple add.
-                            if src is not None and src in repo.dirstate:
-                                copies.setdefault(src, []).append(dst)
-                            repo.dirstate.update_file(
-                                dst, p1_tracked=False, wc_tracked=True
+            with repo.dirstate.changing_parents(repo):
+                if diffopts.git or diffopts.upgrade:
+                    copies = {}
+                    for dst in a:
+                        src = repo.dirstate.copied(dst)
+                        # during qfold, the source file for copies may
+                        # be removed. Treat this as a simple add.
+                        if src is not None and src in repo.dirstate:
+                            copies.setdefault(src, []).append(dst)
+                        repo.dirstate.update_file(
+                            dst, p1_tracked=False, wc_tracked=True
+                        )
+                    # remember the copies between patchparent and qtip
+                    for dst in aaa:
+                        src = ctx[dst].copysource()
+                        if src:
+                            copies.setdefault(src, []).extend(
+                                copies.get(dst, [])
                             )
-                        # remember the copies between patchparent and qtip
-                        for dst in aaa:
-                            src = ctx[dst].copysource()
-                            if src:
-                                copies.setdefault(src, []).extend(
-                                    copies.get(dst, [])
-                                )
-                                if dst in a:
-                                    copies[src].append(dst)
-                            # we can't copy a file created by the patch itself
-                            if dst in copies:
-                                del copies[dst]
-                        for src, dsts in copies.items():
-                            for dst in dsts:
-                                repo.dirstate.copy(src, dst)
-                    else:
-                        for dst in a:
-                            repo.dirstate.update_file(
-                                dst, p1_tracked=False, wc_tracked=True
-                            )
-                        # Drop useless copy information
-                        for f in list(repo.dirstate.copies()):
-                            repo.dirstate.copy(None, f)
-                    for f in r:
-                        repo.dirstate.update_file_p1(f, p1_tracked=True)
-                    # if the patch excludes a modified file, mark that
-                    # file with mtime=0 so status can see it.
-                    mm = []
-                    for i in range(len(m) - 1, -1, -1):
-                        if not match1(m[i]):
-                            mm.append(m[i])
-                            del m[i]
-                    for f in m:
-                        repo.dirstate.update_file_p1(f, p1_tracked=True)
-                    for f in mm:
-                        repo.dirstate.update_file_p1(f, p1_tracked=True)
-                    for f in forget:
-                        repo.dirstate.update_file_p1(f, p1_tracked=False)
-
-                    user = ph.user or ctx.user()
-
-                    oldphase = repo[top].phase()
-
-                    # assumes strip can roll itself back if interrupted
-                    repo.setparents(*cparents)
-                    self.applied.pop()
-                    self.applieddirty = True
-                    strip(self.ui, repo, [top], update=False, backup=False)
-                    dsguard.close()
-                finally:
-                    release(dsguard)
+                            if dst in a:
+                                copies[src].append(dst)
+                        # we can't copy a file created by the patch itself
+                        if dst in copies:
+                            del copies[dst]
+                    for src, dsts in copies.items():
+                        for dst in dsts:
+                            repo.dirstate.copy(src, dst)
+                else:
+                    for dst in a:
+                        repo.dirstate.update_file(
+                            dst, p1_tracked=False, wc_tracked=True
+                        )
+                    # Drop useless copy information
+                    for f in list(repo.dirstate.copies()):
+                        repo.dirstate.copy(None, f)
+                for f in r:
+                    repo.dirstate.update_file_p1(f, p1_tracked=True)
+                # if the patch excludes a modified file, mark that
+                # file with mtime=0 so status can see it.
+                mm = []
+                for i in range(len(m) - 1, -1, -1):
+                    if not match1(m[i]):
+                        mm.append(m[i])
+                        del m[i]
+                for f in m:
+                    repo.dirstate.update_file_p1(f, p1_tracked=True)
+                for f in mm:
+                    repo.dirstate.update_file_p1(f, p1_tracked=True)
+                for f in forget:
+                    repo.dirstate.update_file_p1(f, p1_tracked=False)
+
+                user = ph.user or ctx.user()
+
+                oldphase = repo[top].phase()
+
+                # assumes strip can roll itself back if interrupted
+                repo.setparents(*cparents)
+                repo.dirstate.write(repo.currenttransaction())
+                self.applied.pop()
+                self.applieddirty = True
+                strip(self.ui, repo, [top], update=False, backup=False)
 
             try:
                 # might be nice to attempt to roll back strip after this
@@ -2124,8 +2121,9 @@
                 finally:
                     lockmod.release(tr, lock)
             except:  # re-raises
-                ctx = repo[cparents[0]]
-                repo.dirstate.rebuild(ctx.node(), ctx.manifest())
+                with repo.dirstate.changing_parents(repo):
+                    ctx = repo[cparents[0]]
+                    repo.dirstate.rebuild(ctx.node(), ctx.manifest())
                 self.savedirty()
                 self.ui.warn(
                     _(
@@ -2760,18 +2758,19 @@
     r = q.init(repo, create)
     q.savedirty()
     if r:
-        if not os.path.exists(r.wjoin(b'.hgignore')):
-            fp = r.wvfs(b'.hgignore', b'w')
-            fp.write(b'^\\.hg\n')
-            fp.write(b'^\\.mq\n')
-            fp.write(b'syntax: glob\n')
-            fp.write(b'status\n')
-            fp.write(b'guards\n')
-            fp.close()
-        if not os.path.exists(r.wjoin(b'series')):
-            r.wvfs(b'series', b'w').close()
-        r[None].add([b'.hgignore', b'series'])
-        commands.add(ui, r)
+        with r.wlock(), r.dirstate.changing_files(r):
+            if not os.path.exists(r.wjoin(b'.hgignore')):
+                fp = r.wvfs(b'.hgignore', b'w')
+                fp.write(b'^\\.hg\n')
+                fp.write(b'^\\.mq\n')
+                fp.write(b'syntax: glob\n')
+                fp.write(b'status\n')
+                fp.write(b'guards\n')
+                fp.close()
+            if not os.path.exists(r.wjoin(b'series')):
+                r.wvfs(b'series', b'w').close()
+            r[None].add([b'.hgignore', b'series'])
+            commands.add(ui, r)
     return 0
 
 
@@ -2854,16 +2853,17 @@
     # main repo (destination and sources)
     if dest is None:
         dest = hg.defaultdest(source)
-    __, source_path, __ = urlutil.get_clone_path(ui, source)
+    source_path = urlutil.get_clone_path_obj(ui, source)
     sr = hg.peer(ui, opts, source_path)
 
     # patches repo (source only)
     if opts.get(b'patches'):
-        __, patchespath, __ = urlutil.get_clone_path(ui, opts.get(b'patches'))
+        patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
     else:
-        patchespath = patchdir(sr)
+        # XXX path: we should turn this into a path object
+        patches_path = patchdir(sr)
     try:
-        hg.peer(ui, opts, patchespath)
+        hg.peer(ui, opts, patches_path)
     except error.RepoError:
         raise error.Abort(
             _(b'versioned patch repository not found (see init --mq)')
@@ -3223,45 +3223,46 @@
         raise error.Abort(_(b'qfold requires at least one patch name'))
     if not q.checktoppatch(repo)[0]:
         raise error.Abort(_(b'no patches applied'))
-    q.checklocalchanges(repo)
-
-    message = cmdutil.logmessage(ui, opts)
-
-    parent = q.lookup(b'qtip')
-    patches = []
-    messages = []
-    for f in files:
-        p = q.lookup(f)
-        if p in patches or p == parent:
-            ui.warn(_(b'skipping already folded patch %s\n') % p)
-        if q.isapplied(p):
-            raise error.Abort(
-                _(b'qfold cannot fold already applied patch %s') % p
-            )
-        patches.append(p)
-
-    for p in patches:
+
+    with repo.wlock():
+        q.checklocalchanges(repo)
+
+        message = cmdutil.logmessage(ui, opts)
+
+        parent = q.lookup(b'qtip')
+        patches = []
+        messages = []
+        for f in files:
+            p = q.lookup(f)
+            if p in patches or p == parent:
+                ui.warn(_(b'skipping already folded patch %s\n') % p)
+            if q.isapplied(p):
+                raise error.Abort(
+                    _(b'qfold cannot fold already applied patch %s') % p
+                )
+            patches.append(p)
+
+        for p in patches:
+            if not message:
+                ph = patchheader(q.join(p), q.plainmode)
+                if ph.message:
+                    messages.append(ph.message)
+            pf = q.join(p)
+            (patchsuccess, files, fuzz) = q.patch(repo, pf)
+            if not patchsuccess:
+                raise error.Abort(_(b'error folding patch %s') % p)
+
         if not message:
-            ph = patchheader(q.join(p), q.plainmode)
-            if ph.message:
-                messages.append(ph.message)
-        pf = q.join(p)
-        (patchsuccess, files, fuzz) = q.patch(repo, pf)
-        if not patchsuccess:
-            raise error.Abort(_(b'error folding patch %s') % p)
-
-    if not message:
-        ph = patchheader(q.join(parent), q.plainmode)
-        message = ph.message
-        for msg in messages:
-            if msg:
-                if message:
-                    message.append(b'* * *')
-                message.extend(msg)
-        message = b'\n'.join(message)
-
-    diffopts = q.patchopts(q.diffopts(), *patches)
-    with repo.wlock():
+            ph = patchheader(q.join(parent), q.plainmode)
+            message = ph.message
+            for msg in messages:
+                if msg:
+                    if message:
+                        message.append(b'* * *')
+                    message.extend(msg)
+            message = b'\n'.join(message)
+
+        diffopts = q.patchopts(q.diffopts(), *patches)
         q.refresh(
             repo,
             msg=message,
@@ -3627,8 +3628,8 @@
     util.rename(q.join(patch), absdest)
     r = q.qrepo()
     if r and patch in r.dirstate:
-        wctx = r[None]
-        with r.wlock():
+        with r.wlock(), r.dirstate.changing_files(r):
+            wctx = r[None]
             if r.dirstate.get_entry(patch).added:
                 r.dirstate.set_untracked(patch)
                 r.dirstate.set_tracked(name)
--- a/hgext/narrow/narrowbundle2.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/narrow/narrowbundle2.py	Thu Mar 02 22:45:44 2023 +0100
@@ -261,6 +261,7 @@
     # other servers may include a changespec part even when not widening (e.g.
     # because we're deepening a shallow repo).
     if util.safehasattr(repo, 'setnewnarrowpats'):
+        op.gettransaction()
         repo.setnewnarrowpats()
 
 
--- a/hgext/narrow/narrowcommands.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/narrow/narrowcommands.py	Thu Mar 02 22:45:44 2023 +0100
@@ -320,7 +320,7 @@
                 repo.store.markremoved(f)
 
             ui.status(_(b'deleting unwanted files from working copy\n'))
-            with repo.dirstate.parentchange():
+            with repo.dirstate.changing_parents(repo):
                 narrowspec.updateworkingcopy(repo, assumeclean=True)
                 narrowspec.copytoworkingcopy(repo)
 
@@ -380,7 +380,7 @@
         if ellipsesremote:
             ds = repo.dirstate
             p1, p2 = ds.p1(), ds.p2()
-            with ds.parentchange():
+            with ds.changing_parents(repo):
                 ds.setparents(repo.nullid, repo.nullid)
         if isoldellipses:
             with wrappedextraprepare:
@@ -416,13 +416,15 @@
                     repo, trmanager.transaction, source=b'widen'
                 )
                 # TODO: we should catch error.Abort here
-                bundle2.processbundle(repo, bundle, op=op)
+                bundle2.processbundle(repo, bundle, op=op, remote=remote)
 
         if ellipsesremote:
-            with ds.parentchange():
+            with ds.changing_parents(repo):
                 ds.setparents(p1, p2)
 
-        with repo.transaction(b'widening'), repo.dirstate.parentchange():
+        with repo.transaction(b'widening'), repo.dirstate.changing_parents(
+            repo
+        ):
             repo.setnewnarrowpats()
             narrowspec.updateworkingcopy(repo)
             narrowspec.copytoworkingcopy(repo)
@@ -561,20 +563,9 @@
         or update_working_copy
     )
 
-    oldincludes, oldexcludes = repo.narrowpats
-
-    # filter the user passed additions and deletions into actual additions and
-    # deletions of excludes and includes
-    addedincludes -= oldincludes
-    removedincludes &= oldincludes
-    addedexcludes -= oldexcludes
-    removedexcludes &= oldexcludes
-
-    widening = addedincludes or removedexcludes
-    narrowing = removedincludes or addedexcludes
-
     # Only print the current narrowspec.
     if only_show:
+        oldincludes, oldexcludes = repo.narrowpats
         ui.pager(b'tracked')
         fm = ui.formatter(b'narrow', opts)
         for i in sorted(oldincludes):
@@ -588,28 +579,39 @@
         fm.end()
         return 0
 
-    if update_working_copy:
-        with repo.wlock(), repo.lock(), repo.transaction(
-            b'narrow-wc'
-        ), repo.dirstate.parentchange():
-            narrowspec.updateworkingcopy(repo)
-            narrowspec.copytoworkingcopy(repo)
-        return 0
+    with repo.wlock(), repo.lock():
+        oldincludes, oldexcludes = repo.narrowpats
+
+        # filter the user passed additions and deletions into actual additions and
+        # deletions of excludes and includes
+        addedincludes -= oldincludes
+        removedincludes &= oldincludes
+        addedexcludes -= oldexcludes
+        removedexcludes &= oldexcludes
+
+        widening = addedincludes or removedexcludes
+        narrowing = removedincludes or addedexcludes
 
-    if not (widening or narrowing or autoremoveincludes):
-        ui.status(_(b"nothing to widen or narrow\n"))
-        return 0
+        if update_working_copy:
+            with repo.transaction(b'narrow-wc'), repo.dirstate.changing_parents(
+                repo
+            ):
+                narrowspec.updateworkingcopy(repo)
+                narrowspec.copytoworkingcopy(repo)
+            return 0
 
-    with repo.wlock(), repo.lock():
+        if not (widening or narrowing or autoremoveincludes):
+            ui.status(_(b"nothing to widen or narrow\n"))
+            return 0
+
         cmdutil.bailifchanged(repo)
 
         # Find the revisions we have in common with the remote. These will
         # be used for finding local-only changes for narrowing. They will
         # also define the set of revisions to update for widening.
-        r = urlutil.get_unique_pull_path(b'tracked', repo, ui, remotepath)
-        url, branches = r
-        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
-        remote = hg.peer(repo, opts, url)
+        path = urlutil.get_unique_pull_path_obj(b'tracked', ui, remotepath)
+        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
+        remote = hg.peer(repo, opts, path)
 
         try:
             # check narrow support before doing anything if widening needs to be
--- a/hgext/narrow/narrowrepo.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/narrow/narrowrepo.py	Thu Mar 02 22:45:44 2023 +0100
@@ -19,8 +19,8 @@
             dirstate = super(narrowrepository, self)._makedirstate()
             return narrowdirstate.wrapdirstate(self, dirstate)
 
-        def peer(self):
-            peer = super(narrowrepository, self).peer()
+        def peer(self, path=None):
+            peer = super(narrowrepository, self).peer(path=path)
             peer._caps.add(wireprototypes.NARROWCAP)
             peer._caps.add(wireprototypes.ELLIPSESCAP)
             return peer
--- a/hgext/notify.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/notify.py	Thu Mar 02 22:45:44 2023 +0100
@@ -450,7 +450,7 @@
         try:
             msg = mail.parsebytes(data)
         except emailerrors.MessageParseError as inst:
-            raise error.Abort(inst)
+            raise error.Abort(stringutil.forcebytestr(inst))
 
         # store sender and subject
         sender = msg['From']
--- a/hgext/phabricator.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/phabricator.py	Thu Mar 02 22:45:44 2023 +0100
@@ -286,9 +286,12 @@
                 import hgdemandimport
 
                 with hgdemandimport.deactivated():
+                    # pytype: disable=import-error
                     import vcr as vcrmod
                     import vcr.stubs as stubs
 
+                    # pytype: enable=import-error
+
                     vcr = vcrmod.VCR(
                         serializer='json',
                         before_record_request=sanitiserequest,
@@ -350,11 +353,14 @@
     """
     flatparams = util.sortdict()
 
-    def process(prefix, obj):
+    def process(prefix: bytes, obj):
         if isinstance(obj, bool):
             obj = {True: b'true', False: b'false'}[obj]  # Python -> PHP form
         lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
+        # .items() will only be called for a dict type
+        # pytype: disable=attribute-error
         items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
+        # pytype: enable=attribute-error
         if items is None:
             flatparams[prefix] = obj
         else:
--- a/hgext/rebase.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/rebase.py	Thu Mar 02 22:45:44 2023 +0100
@@ -30,7 +30,6 @@
     commands,
     copies,
     destutil,
-    dirstateguard,
     error,
     extensions,
     logcmdutil,
@@ -1271,15 +1270,9 @@
         # one transaction here. Otherwise, transactions are obtained when
         # committing each node, which is slower but allows partial success.
         with util.acceptintervention(tr):
-            # Same logic for the dirstate guard, except we don't create one when
-            # rebasing in-memory (it's not needed).
-            dsguard = None
-            if singletr and not rbsrt.inmemory:
-                dsguard = dirstateguard.dirstateguard(repo, b'rebase')
-            with util.acceptintervention(dsguard):
-                rbsrt._performrebase(tr)
-                if not rbsrt.dryrun:
-                    rbsrt._finishrebase()
+            rbsrt._performrebase(tr)
+            if not rbsrt.dryrun:
+                rbsrt._finishrebase()
 
 
 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
@@ -1500,16 +1493,18 @@
 def commitnode(repo, editor, extra, user, date, commitmsg):
     """Commit the wd changes with parents p1 and p2.
     Return node of committed revision."""
-    dsguard = util.nullcontextmanager()
+    tr = util.nullcontextmanager
     if not repo.ui.configbool(b'rebase', b'singletransaction'):
-        dsguard = dirstateguard.dirstateguard(repo, b'rebase')
-    with dsguard:
+        tr = lambda: repo.transaction(b'rebase')
+    with tr():
         # Commit might fail if unresolved files exist
         newnode = repo.commit(
             text=commitmsg, user=user, date=date, extra=extra, editor=editor
         )
 
-        repo.dirstate.setbranch(repo[newnode].branch())
+        repo.dirstate.setbranch(
+            repo[newnode].branch(), repo.currenttransaction()
+        )
         return newnode
 
 
@@ -1520,12 +1515,14 @@
     p1ctx = repo[p1]
     if wctx.isinmemory():
         wctx.setbase(p1ctx)
+        scope = util.nullcontextmanager
     else:
         if repo[b'.'].rev() != p1:
             repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
             mergemod.clean_update(p1ctx)
         else:
             repo.ui.debug(b" already in destination\n")
+        scope = lambda: repo.dirstate.changing_parents(repo)
         # This is, alas, necessary to invalidate workingctx's manifest cache,
         # as well as other data we litter on it in other places.
         wctx = repo[None]
@@ -1535,26 +1532,27 @@
     if base is not None:
         repo.ui.debug(b"   detach base %d:%s\n" % (base, repo[base]))
 
-    # See explanation in merge.graft()
-    mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
-    stats = mergemod._update(
-        repo,
-        rev,
-        branchmerge=True,
-        force=True,
-        ancestor=base,
-        mergeancestor=mergeancestor,
-        labels=[b'dest', b'source', b'parent of source'],
-        wc=wctx,
-    )
-    wctx.setparents(p1ctx.node(), repo[p2].node())
-    if collapse:
-        copies.graftcopies(wctx, ctx, p1ctx)
-    else:
-        # If we're not using --collapse, we need to
-        # duplicate copies between the revision we're
-        # rebasing and its first parent.
-        copies.graftcopies(wctx, ctx, ctx.p1())
+    with scope():
+        # See explanation in merge.graft()
+        mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
+        stats = mergemod._update(
+            repo,
+            rev,
+            branchmerge=True,
+            force=True,
+            ancestor=base,
+            mergeancestor=mergeancestor,
+            labels=[b'dest', b'source', b'parent of source'],
+            wc=wctx,
+        )
+        wctx.setparents(p1ctx.node(), repo[p2].node())
+        if collapse:
+            copies.graftcopies(wctx, ctx, p1ctx)
+        else:
+            # If we're not using --collapse, we need to
+            # duplicate copies between the revision we're
+            # rebasing and its first parent.
+            copies.graftcopies(wctx, ctx, ctx.p1())
 
     if stats.unresolvedcount > 0:
         if wctx.isinmemory():
--- a/hgext/releasenotes.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/releasenotes.py	Thu Mar 02 22:45:44 2023 +0100
@@ -39,7 +39,7 @@
 try:
     # Silence a warning about python-Levenshtein.
     #
-    # We don't need the the performance that much and it get anoying in tests.
+    # We don't need the performance that much and it gets annoying in tests.
     import warnings
 
     with warnings.catch_warnings():
@@ -50,7 +50,7 @@
             module="fuzzywuzzy.fuzz",
         )
 
-        import fuzzywuzzy.fuzz as fuzz
+        import fuzzywuzzy.fuzz as fuzz  # pytype: disable=import-error
 
         fuzz.token_set_ratio
 except ImportError:
--- a/hgext/relink.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/relink.py	Thu Mar 02 22:45:44 2023 +0100
@@ -67,8 +67,8 @@
 
     if origin is None and b'default-relink' in ui.paths:
         origin = b'default-relink'
-    path, __ = urlutil.get_unique_pull_path(b'relink', repo, ui, origin)
-    src = hg.repository(repo.baseui, path)
+    path = urlutil.get_unique_pull_path_obj(b'relink', ui, origin)
+    src = hg.repository(repo.baseui, path.loc)
     ui.status(_(b'relinking %s to %s\n') % (src.store.path, repo.store.path))
     if repo.root == src.root:
         ui.status(_(b'there is nothing to relink\n'))
--- a/hgext/remotefilelog/remotefilelog.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/remotefilelog/remotefilelog.py	Thu Mar 02 22:45:44 2023 +0100
@@ -299,6 +299,7 @@
         deltaprevious=False,
         deltamode=None,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         # we don't use any of these parameters here
         del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
--- a/hgext/remotefilelog/shallowutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/remotefilelog/shallowutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -247,7 +247,7 @@
         index = raw.index(b'\0')
     except ValueError:
         raise BadRemotefilelogHeader(
-            "unexpected remotefilelog header: illegal format"
+            b"unexpected remotefilelog header: illegal format"
         )
     header = raw[:index]
     if header.startswith(b'v'):
@@ -267,7 +267,7 @@
         size = int(header)
     if size is None:
         raise BadRemotefilelogHeader(
-            "unexpected remotefilelog header: no size found"
+            b"unexpected remotefilelog header: no size found"
         )
     return index + 1, size, flags
 
--- a/hgext/schemes.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/schemes.py	Thu Mar 02 22:45:44 2023 +0100
@@ -80,9 +80,25 @@
     def __repr__(self):
         return b'<ShortRepository: %s>' % self.scheme
 
+    def make_peer(self, ui, path, *args, **kwargs):
+        new_url = self.resolve(path.rawloc)
+        path = path.copy(new_raw_location=new_url)
+        cls = hg.peer_schemes.get(path.url.scheme)
+        if cls is not None:
+            return cls.make_peer(ui, path, *args, **kwargs)
+        return None
+
     def instance(self, ui, url, create, intents=None, createopts=None):
         url = self.resolve(url)
-        return hg._peerlookup(url).instance(
+        u = urlutil.url(url)
+        scheme = u.scheme or b'file'
+        if scheme in hg.peer_schemes:
+            cls = hg.peer_schemes[scheme]
+        elif scheme in hg.repo_schemes:
+            cls = hg.repo_schemes[scheme]
+        else:
+            cls = hg.LocalFactory
+        return cls.instance(
             ui, url, create, intents=intents, createopts=createopts
         )
 
@@ -119,24 +135,29 @@
 }
 
 
+def _check_drive_letter(scheme: bytes) -> None:
+    """check if a scheme conflict with a Windows drive letter"""
+    if (
+        pycompat.iswindows
+        and len(scheme) == 1
+        and scheme.isalpha()
+        and os.path.exists(b'%s:\\' % scheme)
+    ):
+        msg = _(b'custom scheme %s:// conflicts with drive letter %s:\\\n')
+        msg %= (scheme, scheme.upper())
+        raise error.Abort(msg)
+
+
 def extsetup(ui):
     schemes.update(dict(ui.configitems(b'schemes')))
     t = templater.engine(templater.parse)
     for scheme, url in schemes.items():
-        if (
-            pycompat.iswindows
-            and len(scheme) == 1
-            and scheme.isalpha()
-            and os.path.exists(b'%s:\\' % scheme)
-        ):
-            raise error.Abort(
-                _(
-                    b'custom scheme %s:// conflicts with drive '
-                    b'letter %s:\\\n'
-                )
-                % (scheme, scheme.upper())
-            )
-        hg.schemes[scheme] = ShortRepository(url, scheme, t)
+        _check_drive_letter(scheme)
+        url_scheme = urlutil.url(url).scheme
+        if url_scheme in hg.peer_schemes:
+            hg.peer_schemes[scheme] = ShortRepository(url, scheme, t)
+        else:
+            hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
 
     extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
 
@@ -144,7 +165,11 @@
 @command(b'debugexpandscheme', norepo=True)
 def expandscheme(ui, url, **opts):
     """given a repo path, provide the scheme-expanded path"""
-    repo = hg._peerlookup(url)
-    if isinstance(repo, ShortRepository):
-        url = repo.resolve(url)
+    scheme = urlutil.url(url).scheme
+    if scheme in hg.peer_schemes:
+        cls = hg.peer_schemes[scheme]
+    else:
+        cls = hg.repo_schemes.get(scheme)
+    if cls is not None and isinstance(cls, ShortRepository):
+        url = cls.resolve(url)
     ui.write(url + b'\n')
--- a/hgext/split.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/split.py	Thu Mar 02 22:45:44 2023 +0100
@@ -134,7 +134,7 @@
     # Set working parent to ctx.p1(), and keep working copy as ctx's content
     if ctx.node() != repo.dirstate.p1():
         hg.clean(repo, ctx.node(), show_stats=False)
-    with repo.dirstate.parentchange():
+    with repo.dirstate.changing_parents(repo):
         scmutil.movedirstate(repo, ctx.p1())
 
     # Any modified, added, removed, deleted result means split is incomplete
--- a/hgext/sqlitestore.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/sqlitestore.py	Thu Mar 02 22:45:44 2023 +0100
@@ -80,7 +80,7 @@
 )
 
 try:
-    from mercurial import zstd
+    from mercurial import zstd  # pytype: disable=import-error
 
     zstd.__version__
 except ImportError:
@@ -608,6 +608,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         if nodesorder not in (b'nodes', b'storage', b'linear', None):
             raise error.ProgrammingError(
--- a/hgext/transplant.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/transplant.py	Thu Mar 02 22:45:44 2023 +0100
@@ -817,8 +817,8 @@
 
     sourcerepo = opts.get(b'source')
     if sourcerepo:
-        u = urlutil.get_unique_pull_path(b'transplant', repo, ui, sourcerepo)[0]
-        peer = hg.peer(repo, opts, u)
+        path = urlutil.get_unique_pull_path_obj(b'transplant', ui, sourcerepo)
+        peer = hg.peer(repo, opts, path)
         heads = pycompat.maplist(peer.lookup, opts.get(b'branch', ()))
         target = set(heads)
         for r in revs:
--- a/hgext/uncommit.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/uncommit.py	Thu Mar 02 22:45:44 2023 +0100
@@ -236,7 +236,7 @@
                 # Fully removed the old commit
                 mapping[old.node()] = ()
 
-            with repo.dirstate.parentchange():
+            with repo.dirstate.changing_parents(repo):
                 scmutil.movedirstate(repo, repo[newid], match)
 
             scmutil.cleanupnodes(repo, mapping, b'uncommit', fixphase=True)
@@ -317,7 +317,7 @@
         newpredctx = repo[newprednode]
         dirstate = repo.dirstate
 
-        with dirstate.parentchange():
+        with dirstate.changing_parents(repo):
             scmutil.movedirstate(repo, newpredctx)
 
         mapping = {curctx.node(): (newprednode,)}
--- a/hgext/win32text.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/hgext/win32text.py	Thu Mar 02 22:45:44 2023 +0100
@@ -216,17 +216,23 @@
 def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs):
     # reset dirstate cache for file we touch
     ds = repo.dirstate
-    with ds.parentchange():
-        for filename in actions[b'revert'][0]:
-            entry = ds.get_entry(filename)
-            if entry is not None:
-                if entry.p1_tracked:
-                    ds.update_file(
-                        filename,
-                        entry.tracked,
-                        p1_tracked=True,
-                        p2_info=entry.p2_info,
-                    )
+    for filename in actions[b'revert'][0]:
+        entry = ds.get_entry(filename)
+        if entry is not None:
+            if entry.p1_tracked:
+                # If we revert the file, it is possibly dirty. However,
+                # this extension meddle with the file content and therefore
+                # its size. As a result, we cannot simply call
+                # `dirstate.set_possibly_dirty` as it will not affet the
+                # expected size of the file.
+                #
+                # At least, now, the quirk is properly documented.
+                ds.hacky_extension_update_file(
+                    filename,
+                    entry.tracked,
+                    p1_tracked=entry.p1_tracked,
+                    p2_info=entry.p2_info,
+                )
     return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs)
 
 
--- a/mercurial/archival.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/archival.py	Thu Mar 02 22:45:44 2023 +0100
@@ -154,9 +154,14 @@
                 )
                 self.fileobj = gzfileobj
                 return (
+                    # taropen() wants Literal['a', 'r', 'w', 'x'] for the mode,
+                    # but Literal[] is only available in 3.8+ without the
+                    # typing_extensions backport.
+                    # pytype: disable=wrong-arg-types
                     tarfile.TarFile.taropen(  # pytype: disable=attribute-error
                         name, pycompat.sysstr(mode), gzfileobj
                     )
+                    # pytype: enable=wrong-arg-types
                 )
             else:
                 try:
--- a/mercurial/bundle2.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/bundle2.py	Thu Mar 02 22:45:44 2023 +0100
@@ -315,8 +315,17 @@
     * a way to construct a bundle response when applicable.
     """
 
-    def __init__(self, repo, transactiongetter, captureoutput=True, source=b''):
+    def __init__(
+        self,
+        repo,
+        transactiongetter,
+        captureoutput=True,
+        source=b'',
+        remote=None,
+    ):
         self.repo = repo
+        # the peer object who produced this bundle if available
+        self.remote = remote
         self.ui = repo.ui
         self.records = unbundlerecords()
         self.reply = None
@@ -363,7 +372,7 @@
     raise TransactionUnavailable()
 
 
-def applybundle(repo, unbundler, tr, source, url=None, **kwargs):
+def applybundle(repo, unbundler, tr, source, url=None, remote=None, **kwargs):
     # transform me into unbundler.apply() as soon as the freeze is lifted
     if isinstance(unbundler, unbundle20):
         tr.hookargs[b'bundle2'] = b'1'
@@ -371,10 +380,12 @@
             tr.hookargs[b'source'] = source
         if url is not None and b'url' not in tr.hookargs:
             tr.hookargs[b'url'] = url
-        return processbundle(repo, unbundler, lambda: tr, source=source)
+        return processbundle(
+            repo, unbundler, lambda: tr, source=source, remote=remote
+        )
     else:
         # the transactiongetter won't be used, but we might as well set it
-        op = bundleoperation(repo, lambda: tr, source=source)
+        op = bundleoperation(repo, lambda: tr, source=source, remote=remote)
         _processchangegroup(op, unbundler, tr, source, url, **kwargs)
         return op
 
@@ -450,7 +461,14 @@
         )
 
 
-def processbundle(repo, unbundler, transactiongetter=None, op=None, source=b''):
+def processbundle(
+    repo,
+    unbundler,
+    transactiongetter=None,
+    op=None,
+    source=b'',
+    remote=None,
+):
     """This function process a bundle, apply effect to/from a repo
 
     It iterates over each part then searches for and uses the proper handling
@@ -466,7 +484,12 @@
     if op is None:
         if transactiongetter is None:
             transactiongetter = _notransaction
-        op = bundleoperation(repo, transactiongetter, source=source)
+        op = bundleoperation(
+            repo,
+            transactiongetter,
+            source=source,
+            remote=remote,
+        )
     # todo:
     # - replace this is a init function soon.
     # - exception catching
@@ -494,6 +517,10 @@
 
 
 def _processchangegroup(op, cg, tr, source, url, **kwargs):
+    if op.remote is not None and op.remote.path is not None:
+        remote_path = op.remote.path
+        kwargs = kwargs.copy()
+        kwargs['delta_base_reuse_policy'] = remote_path.delta_reuse_policy
     ret = cg.apply(op.repo, tr, source, url, **kwargs)
     op.records.add(
         b'changegroup',
@@ -1938,7 +1965,12 @@
             raise error.Abort(
                 _(b'old bundle types only supports v1 changegroups')
             )
+
+        # HG20 is the case without 2 values to unpack, but is handled above.
+        # pytype: disable=bad-unpacking
         header, comp = bundletypes[bundletype]
+        # pytype: enable=bad-unpacking
+
         if comp not in util.compengines.supportedbundletypes:
             raise error.Abort(_(b'unknown stream compression type: %s') % comp)
         compengine = util.compengines.forbundletype(comp)
--- a/mercurial/bundlecaches.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/bundlecaches.py	Thu Mar 02 22:45:44 2023 +0100
@@ -5,6 +5,10 @@
 
 import collections
 
+from typing import (
+    cast,
+)
+
 from .i18n import _
 
 from .thirdparty import attr
@@ -247,7 +251,7 @@
     # required to apply it. If we see this metadata, compare against what the
     # repo supports and error if the bundle isn't compatible.
     if version == b'packed1' and b'requirements' in params:
-        requirements = set(params[b'requirements'].split(b','))
+        requirements = set(cast(bytes, params[b'requirements']).split(b','))
         missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS
         if missingreqs:
             raise error.UnsupportedBundleSpecification(
--- a/mercurial/bundlerepo.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/bundlerepo.py	Thu Mar 02 22:45:44 2023 +0100
@@ -88,7 +88,7 @@
                     )
 
             if not self.index.has_node(deltabase):
-                raise LookupError(
+                raise error.LookupError(
                     deltabase, self.display_id, _(b'unknown delta base')
                 )
 
@@ -289,24 +289,28 @@
 
         self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
 
+        # dict with the mapping 'filename' -> position in the changegroup.
+        self._cgfilespos = {}
+        self._bundlefile = None
+        self._cgunpacker = None
         self.tempfile = None
         f = util.posixfile(bundlepath, b"rb")
         bundle = exchange.readbundle(self.ui, f, bundlepath)
 
         if isinstance(bundle, bundle2.unbundle20):
             self._bundlefile = bundle
-            self._cgunpacker = None
 
             cgpart = None
             for part in bundle.iterparts(seekable=True):
-                if part.type == b'changegroup':
+                if part.type == b'phase-heads':
+                    self._handle_bundle2_phase_part(bundle, part)
+                elif part.type == b'changegroup':
                     if cgpart:
                         raise NotImplementedError(
                             b"can't process multiple changegroups"
                         )
                     cgpart = part
-
-                self._handlebundle2part(bundle, part)
+                    self._handle_bundle2_cg_part(bundle, part)
 
             if not cgpart:
                 raise error.Abort(_(b"No changegroups found"))
@@ -319,21 +323,19 @@
             cgpart.seek(0, os.SEEK_SET)
 
         elif isinstance(bundle, changegroup.cg1unpacker):
-            if bundle.compressed():
-                f = self._writetempbundle(
-                    bundle.read, b'.hg10un', header=b'HG10UN'
-                )
-                bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
-
-            self._bundlefile = bundle
-            self._cgunpacker = bundle
+            self._handle_bundle1(bundle, bundlepath)
         else:
             raise error.Abort(
-                _(b'bundle type %s cannot be read') % type(bundle)
+                _(b'bundle type %r cannot be read') % type(bundle)
             )
 
-        # dict with the mapping 'filename' -> position in the changegroup.
-        self._cgfilespos = {}
+    def _handle_bundle1(self, bundle, bundlepath):
+        if bundle.compressed():
+            f = self._writetempbundle(bundle.read, b'.hg10un', header=b'HG10UN')
+            bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
+
+        self._bundlefile = bundle
+        self._cgunpacker = bundle
 
         self.firstnewrev = self.changelog.repotiprev + 1
         phases.retractboundary(
@@ -343,11 +345,20 @@
             [ctx.node() for ctx in self[self.firstnewrev :]],
         )
 
-    def _handlebundle2part(self, bundle, part):
-        if part.type != b'changegroup':
-            return
-
+    def _handle_bundle2_cg_part(self, bundle, part):
+        assert part.type == b'changegroup'
         cgstream = part
+        targetphase = part.params.get(b'targetphase')
+        try:
+            targetphase = int(targetphase)
+        except TypeError:
+            pass
+        if targetphase is None:
+            targetphase = phases.draft
+        if targetphase not in phases.allphases:
+            m = _(b'unsupported targetphase: %d')
+            m %= targetphase
+            raise error.Abort(m)
         version = part.params.get(b'version', b'01')
         legalcgvers = changegroup.supportedincomingversions(self)
         if version not in legalcgvers:
@@ -358,6 +369,21 @@
 
         self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
 
+        self.firstnewrev = self.changelog.repotiprev + 1
+        phases.retractboundary(
+            self,
+            None,
+            targetphase,
+            [ctx.node() for ctx in self[self.firstnewrev :]],
+        )
+
+    def _handle_bundle2_phase_part(self, bundle, part):
+        assert part.type == b'phase-heads'
+
+        unfi = self.unfiltered()
+        headsbyphase = phases.binarydecode(part)
+        phases.updatephases(unfi, lambda: None, headsbyphase)
+
     def _writetempbundle(self, readfn, suffix, header=b''):
         """Write a temporary file to disk"""
         fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
@@ -458,8 +484,8 @@
     def cancopy(self):
         return False
 
-    def peer(self):
-        return bundlepeer(self)
+    def peer(self, path=None):
+        return bundlepeer(self, path=path)
 
     def getcwd(self):
         return encoding.getcwd()  # always outside the repo
--- a/mercurial/cext/bdiff.pyi	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cext/bdiff.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -5,7 +5,7 @@
 
 version: int
 
-def bdiff(a: bytes, b: bytes): bytes
+def bdiff(a: bytes, b: bytes) -> bytes: ...
 def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ...
 def fixws(s: bytes, allws: bool) -> bytes: ...
 def splitnewlines(text: bytes) -> List[bytes]: ...
--- a/mercurial/cext/osutil.pyi	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cext/osutil.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -2,6 +2,7 @@
     AnyStr,
     IO,
     List,
+    Optional,
     Sequence,
 )
 
@@ -15,7 +16,7 @@
     st_mtime: int
     st_ctime: int
 
-def listdir(path: bytes, st: bool, skip: bool) -> List[stat]: ...
+def listdir(path: bytes, st: bool, skip: Optional[bool]) -> List[stat]: ...
 def posixfile(name: AnyStr, mode: bytes, buffering: int) -> IO: ...
 def statfiles(names: Sequence[bytes]) -> List[stat]: ...
 def setprocname(name: bytes) -> None: ...
--- a/mercurial/cext/parsers.c	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cext/parsers.c	Thu Mar 02 22:45:44 2023 +0100
@@ -177,7 +177,7 @@
 	        (dirstate_flag_p1_tracked | dirstate_flag_p2_info));
 }
 
-static inline bool dirstate_item_c_merged(dirstateItemObject *self)
+static inline bool dirstate_item_c_modified(dirstateItemObject *self)
 {
 	return ((self->flags & dirstate_flag_wc_tracked) &&
 	        (self->flags & dirstate_flag_p1_tracked) &&
@@ -195,7 +195,7 @@
 {
 	if (dirstate_item_c_removed(self)) {
 		return 'r';
-	} else if (dirstate_item_c_merged(self)) {
+	} else if (dirstate_item_c_modified(self)) {
 		return 'm';
 	} else if (dirstate_item_c_added(self)) {
 		return 'a';
@@ -642,9 +642,9 @@
 	}
 };
 
-static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
+static PyObject *dirstate_item_get_modified(dirstateItemObject *self)
 {
-	if (dirstate_item_c_merged(self)) {
+	if (dirstate_item_c_modified(self)) {
 		Py_RETURN_TRUE;
 	} else {
 		Py_RETURN_FALSE;
@@ -709,7 +709,7 @@
      NULL},
     {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
     {"p2_info", (getter)dirstate_item_get_p2_info, NULL, "p2_info", NULL},
-    {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
+    {"modified", (getter)dirstate_item_get_modified, NULL, "modified", NULL},
     {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
     {"maybe_clean", (getter)dirstate_item_get_maybe_clean, NULL, "maybe_clean",
      NULL},
@@ -1187,7 +1187,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 20;
+static const int version = 21;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/parsers.pyi	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cext/parsers.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -76,3 +76,7 @@
 
     def insert(self, rev: int) -> None: ...
     def shortest(self, node: bytes) -> int: ...
+
+# The IndexObject type here is defined in C, and there's no type for a buffer
+# return, as of py3.11.  https://github.com/python/typing/issues/593
+def parse_index2(data: object, inline: object, format: int = ...) -> Tuple[object, Optional[Tuple[int, object]]]: ...
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/cext/py.typed	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,1 @@
+partial
--- a/mercurial/cext/revlog.c	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cext/revlog.c	Thu Mar 02 22:45:44 2023 +0100
@@ -1446,16 +1446,25 @@
 static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
 {
 	Py_ssize_t start_rev;
+	Py_ssize_t end_rev;
 	PyObject *cache;
 	Py_ssize_t base;
 	Py_ssize_t rev;
 	PyObject *key = NULL;
 	PyObject *value = NULL;
 	const Py_ssize_t length = index_length(self);
-	if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
+	if (!PyArg_ParseTuple(args, "O!nn", &PyDict_Type, &cache, &start_rev,
+	                      &end_rev)) {
 		return NULL;
 	}
-	for (rev = start_rev; rev < length; rev++) {
+	end_rev += 1;
+	if (end_rev > length) {
+		end_rev = length;
+	}
+	if (start_rev < 0) {
+		start_rev = 0;
+	}
+	for (rev = start_rev; rev < end_rev; rev++) {
 		int issnap;
 		PyObject *allvalues = NULL;
 		issnap = index_issnapshotrev(self, rev);
@@ -1480,7 +1489,7 @@
 		}
 		if (allvalues == NULL) {
 			int r;
-			allvalues = PyList_New(0);
+			allvalues = PySet_New(0);
 			if (!allvalues) {
 				goto bail;
 			}
@@ -1491,7 +1500,7 @@
 			}
 		}
 		value = PyLong_FromSsize_t(rev);
-		if (PyList_Append(allvalues, value)) {
+		if (PySet_Add(allvalues, value)) {
 			goto bail;
 		}
 		Py_CLEAR(key);
--- a/mercurial/cffi/bdiff.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cffi/bdiff.py	Thu Mar 02 22:45:44 2023 +0100
@@ -8,6 +8,11 @@
 
 import struct
 
+from typing import (
+    List,
+    Tuple,
+)
+
 from ..pure.bdiff import *
 from . import _bdiff  # pytype: disable=import-error
 
@@ -15,7 +20,7 @@
 lib = _bdiff.lib
 
 
-def blocks(sa, sb):
+def blocks(sa: bytes, sb: bytes) -> List[Tuple[int, int, int, int]]:
     a = ffi.new(b"struct bdiff_line**")
     b = ffi.new(b"struct bdiff_line**")
     ac = ffi.new(b"char[]", str(sa))
@@ -29,7 +34,7 @@
         count = lib.bdiff_diff(a[0], an, b[0], bn, l)
         if count < 0:
             raise MemoryError
-        rl = [None] * count
+        rl = [(0, 0, 0, 0)] * count
         h = l.next
         i = 0
         while h:
@@ -43,7 +48,7 @@
     return rl
 
 
-def bdiff(sa, sb):
+def bdiff(sa: bytes, sb: bytes) -> bytes:
     a = ffi.new(b"struct bdiff_line**")
     b = ffi.new(b"struct bdiff_line**")
     ac = ffi.new(b"char[]", str(sa))
--- a/mercurial/cffi/mpatch.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cffi/mpatch.py	Thu Mar 02 22:45:44 2023 +0100
@@ -6,6 +6,8 @@
 # GNU General Public License version 2 or any later version.
 
 
+from typing import List
+
 from ..pure.mpatch import *
 from ..pure.mpatch import mpatchError  # silence pyflakes
 from . import _mpatch  # pytype: disable=import-error
@@ -26,7 +28,7 @@
     return container[0]
 
 
-def patches(text, bins):
+def patches(text: bytes, bins: List[bytes]) -> bytes:
     lgt = len(bins)
     all = []
     if not lgt:
--- a/mercurial/changegroup.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/changegroup.py	Thu Mar 02 22:45:44 2023 +0100
@@ -105,6 +105,164 @@
                 os.unlink(cleanup)
 
 
+def _dbg_ubdl_line(
+    ui,
+    indent,
+    key,
+    base_value=None,
+    percentage_base=None,
+    percentage_key=None,
+):
+    """Print one line of debug_unbundle_debug_info"""
+    line = b"DEBUG-UNBUNDLING: "
+    line += b' ' * (2 * indent)
+    key += b":"
+    padding = b''
+    if base_value is not None:
+        assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
+        line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
+        if isinstance(base_value, float):
+            line += b"%14.3f seconds" % base_value
+        else:
+            line += b"%10d" % base_value
+            padding = b'            '
+    else:
+        line += key
+
+    if percentage_base is not None:
+        line += padding
+        padding = b''
+        assert base_value is not None
+        percentage = base_value * 100 // percentage_base
+        if percentage_key is not None:
+            line += b" (%3d%% of %s)" % (
+                percentage,
+                percentage_key,
+            )
+        else:
+            line += b" (%3d%%)" % percentage
+
+    line += b'\n'
+    ui.write_err(line)
+
+
+def _sumf(items):
+    # python < 3.8 does not support a `start=0.0` argument to sum
+    # So we have to cheat a bit until we drop support for those version
+    if not items:
+        return 0.0
+    return sum(items)
+
+
+def display_unbundle_debug_info(ui, debug_info):
+    """display an unbundling report from debug information"""
+    cl_info = []
+    mn_info = []
+    fl_info = []
+    _dispatch = [
+        (b'CHANGELOG:', cl_info),
+        (b'MANIFESTLOG:', mn_info),
+        (b'FILELOG:', fl_info),
+    ]
+    for e in debug_info:
+        for prefix, info in _dispatch:
+            if e["target-revlog"].startswith(prefix):
+                info.append(e)
+                break
+        else:
+            assert False, 'unreachable'
+    each_info = [
+        (b'changelog', cl_info),
+        (b'manifests', mn_info),
+        (b'files', fl_info),
+    ]
+
+    # General Revision Countss
+    _dbg_ubdl_line(ui, 0, b'revisions', len(debug_info))
+    for key, info in each_info:
+        if not info:
+            continue
+        _dbg_ubdl_line(ui, 1, key, len(info), len(debug_info))
+
+    # General Time spent
+    all_durations = [e['duration'] for e in debug_info]
+    all_durations.sort()
+    total_duration = _sumf(all_durations)
+    _dbg_ubdl_line(ui, 0, b'total-time', total_duration)
+
+    for key, info in each_info:
+        if not info:
+            continue
+        durations = [e['duration'] for e in info]
+        durations.sort()
+        _dbg_ubdl_line(ui, 1, key, _sumf(durations), total_duration)
+
+    # Count and cache reuse per delta types
+    each_types = {}
+    for key, info in each_info:
+        each_types[key] = types = {
+            b'full': 0,
+            b'full-cached': 0,
+            b'snapshot': 0,
+            b'snapshot-cached': 0,
+            b'delta': 0,
+            b'delta-cached': 0,
+            b'unknown': 0,
+            b'unknown-cached': 0,
+        }
+        for e in info:
+            types[e['type']] += 1
+            if e['using-cached-base']:
+                types[e['type'] + b'-cached'] += 1
+
+    EXPECTED_TYPES = (b'full', b'snapshot', b'delta', b'unknown')
+    if debug_info:
+        _dbg_ubdl_line(ui, 0, b'type-count')
+    for key, info in each_info:
+        if info:
+            _dbg_ubdl_line(ui, 1, key)
+        t = each_types[key]
+        for tn in EXPECTED_TYPES:
+            if t[tn]:
+                tc = tn + b'-cached'
+                _dbg_ubdl_line(ui, 2, tn, t[tn])
+                _dbg_ubdl_line(ui, 3, b'cached', t[tc], t[tn])
+
+    # time perf delta types and reuse
+    each_type_time = {}
+    for key, info in each_info:
+        each_type_time[key] = t = {
+            b'full': [],
+            b'full-cached': [],
+            b'snapshot': [],
+            b'snapshot-cached': [],
+            b'delta': [],
+            b'delta-cached': [],
+            b'unknown': [],
+            b'unknown-cached': [],
+        }
+        for e in info:
+            t[e['type']].append(e['duration'])
+            if e['using-cached-base']:
+                t[e['type'] + b'-cached'].append(e['duration'])
+        for t_key, value in list(t.items()):
+            value.sort()
+            t[t_key] = _sumf(value)
+
+    if debug_info:
+        _dbg_ubdl_line(ui, 0, b'type-time')
+    for key, info in each_info:
+        if info:
+            _dbg_ubdl_line(ui, 1, key)
+        t = each_type_time[key]
+        td = total_duration  # to same space on next lines
+        for tn in EXPECTED_TYPES:
+            if t[tn]:
+                tc = tn + b'-cached'
+                _dbg_ubdl_line(ui, 2, tn, t[tn], td, b"total")
+                _dbg_ubdl_line(ui, 3, b'cached', t[tc], td, b"total")
+
+
 class cg1unpacker:
     """Unpacker for cg1 changegroup streams.
 
@@ -254,7 +412,16 @@
                     pos = next
             yield closechunk()
 
-    def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
+    def _unpackmanifests(
+        self,
+        repo,
+        revmap,
+        trp,
+        prog,
+        addrevisioncb=None,
+        debug_info=None,
+        delta_base_reuse_policy=None,
+    ):
         self.callback = prog.increment
         # no need to check for empty manifest group here:
         # if the result of the merge of 1 and 2 is the same in 3 and 4,
@@ -263,7 +430,14 @@
         self.manifestheader()
         deltas = self.deltaiter()
         storage = repo.manifestlog.getstorage(b'')
-        storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
+        storage.addgroup(
+            deltas,
+            revmap,
+            trp,
+            addrevisioncb=addrevisioncb,
+            debug_info=debug_info,
+            delta_base_reuse_policy=delta_base_reuse_policy,
+        )
         prog.complete()
         self.callback = None
 
@@ -276,6 +450,7 @@
         targetphase=phases.draft,
         expectedtotal=None,
         sidedata_categories=None,
+        delta_base_reuse_policy=None,
     ):
         """Add the changegroup returned by source.read() to this repo.
         srctype is a string like 'push', 'pull', or 'unbundle'.  url is
@@ -289,9 +464,19 @@
 
         `sidedata_categories` is an optional set of the remote's sidedata wanted
         categories.
+
+        `delta_base_reuse_policy` is an optional argument, when set to a value
+        it will control the way the delta contained into the bundle are reused
+        when applied in the revlog.
+
+        See `DELTA_BASE_REUSE_*` entry in mercurial.revlogutils.constants.
         """
         repo = repo.unfiltered()
 
+        debug_info = None
+        if repo.ui.configbool(b'debug', b'unbundling-stats'):
+            debug_info = []
+
         # Only useful if we're adding sidedata categories. If both peers have
         # the same categories, then we simply don't do anything.
         adding_sidedata = (
@@ -366,6 +551,8 @@
                 alwayscache=True,
                 addrevisioncb=onchangelog,
                 duplicaterevisioncb=ondupchangelog,
+                debug_info=debug_info,
+                delta_base_reuse_policy=delta_base_reuse_policy,
             ):
                 repo.ui.develwarn(
                     b'applied empty changelog from changegroup',
@@ -413,6 +600,8 @@
                 trp,
                 progress,
                 addrevisioncb=on_manifest_rev,
+                debug_info=debug_info,
+                delta_base_reuse_policy=delta_base_reuse_policy,
             )
 
             needfiles = {}
@@ -449,6 +638,8 @@
                 efiles,
                 needfiles,
                 addrevisioncb=on_filelog_rev,
+                debug_info=debug_info,
+                delta_base_reuse_policy=delta_base_reuse_policy,
             )
 
             if sidedata_helpers:
@@ -567,6 +758,8 @@
                     b'changegroup-runhooks-%020i' % clstart,
                     lambda tr: repo._afterlock(runhooks),
                 )
+            if debug_info is not None:
+                display_unbundle_debug_info(repo.ui, debug_info)
         finally:
             repo.ui.flush()
         # never return 0 here:
@@ -626,9 +819,24 @@
         protocol_flags = 0
         return node, p1, p2, deltabase, cs, flags, protocol_flags
 
-    def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
+    def _unpackmanifests(
+        self,
+        repo,
+        revmap,
+        trp,
+        prog,
+        addrevisioncb=None,
+        debug_info=None,
+        delta_base_reuse_policy=None,
+    ):
         super(cg3unpacker, self)._unpackmanifests(
-            repo, revmap, trp, prog, addrevisioncb=addrevisioncb
+            repo,
+            revmap,
+            trp,
+            prog,
+            addrevisioncb=addrevisioncb,
+            debug_info=debug_info,
+            delta_base_reuse_policy=delta_base_reuse_policy,
         )
         for chunkdata in iter(self.filelogheader, {}):
             # If we get here, there are directory manifests in the changegroup
@@ -636,7 +844,12 @@
             repo.ui.debug(b"adding %s revisions\n" % d)
             deltas = self.deltaiter()
             if not repo.manifestlog.getstorage(d).addgroup(
-                deltas, revmap, trp, addrevisioncb=addrevisioncb
+                deltas,
+                revmap,
+                trp,
+                addrevisioncb=addrevisioncb,
+                debug_info=debug_info,
+                delta_base_reuse_policy=delta_base_reuse_policy,
             ):
                 raise error.Abort(_(b"received dir revlog group is empty"))
 
@@ -869,6 +1082,7 @@
     fullclnodes=None,
     precomputedellipsis=None,
     sidedata_helpers=None,
+    debug_info=None,
 ):
     """Calculate deltas for a set of revisions.
 
@@ -978,6 +1192,7 @@
         assumehaveparentrevisions=not ellipses,
         deltamode=deltamode,
         sidedata_helpers=sidedata_helpers,
+        debug_info=debug_info,
     )
 
     for i, revision in enumerate(revisions):
@@ -1003,6 +1218,187 @@
         progress.complete()
 
 
+def make_debug_info():
+    """ "build a "new" debug_info dictionnary
+
+    That dictionnary can be used to gather information about the bundle process
+    """
+    return {
+        'revision-total': 0,
+        'revision-changelog': 0,
+        'revision-manifest': 0,
+        'revision-files': 0,
+        'file-count': 0,
+        'merge-total': 0,
+        'available-delta': 0,
+        'available-full': 0,
+        'delta-against-prev': 0,
+        'delta-full': 0,
+        'delta-against-p1': 0,
+        'denied-delta-candeltafn': 0,
+        'denied-base-not-available': 0,
+        'reused-storage-delta': 0,
+        'computed-delta': 0,
+    }
+
+
+def merge_debug_info(base, other):
+    """merge the debug information from <other> into <base>
+
+    This function can be used to gather lower level information into higher level ones.
+    """
+    for key in (
+        'revision-total',
+        'revision-changelog',
+        'revision-manifest',
+        'revision-files',
+        'merge-total',
+        'available-delta',
+        'available-full',
+        'delta-against-prev',
+        'delta-full',
+        'delta-against-p1',
+        'denied-delta-candeltafn',
+        'denied-base-not-available',
+        'reused-storage-delta',
+        'computed-delta',
+    ):
+        base[key] += other[key]
+
+
+_KEY_PART_WIDTH = 17
+
+
+def _dbg_bdl_line(
+    ui,
+    indent,
+    key,
+    base_value=None,
+    percentage_base=None,
+    percentage_key=None,
+    percentage_ref=None,
+    extra=None,
+):
+    """Print one line of debug_bundle_debug_info"""
+    line = b"DEBUG-BUNDLING: "
+    line += b' ' * (2 * indent)
+    key += b":"
+    if base_value is not None:
+        assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
+        line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
+        line += b"%10d" % base_value
+    else:
+        line += key
+
+    if percentage_base is not None:
+        assert base_value is not None
+        percentage = base_value * 100 // percentage_base
+        if percentage_key is not None:
+            line += b" (%d%% of %s %d)" % (
+                percentage,
+                percentage_key,
+                percentage_ref,
+            )
+        else:
+            line += b" (%d%%)" % percentage
+
+    if extra:
+        line += b" "
+        line += extra
+
+    line += b'\n'
+    ui.write_err(line)
+
+
+def display_bundling_debug_info(
+    ui,
+    debug_info,
+    cl_debug_info,
+    mn_debug_info,
+    fl_debug_info,
+):
+    """display debug information gathered during a bundling through `ui`"""
+    d = debug_info
+    c = cl_debug_info
+    m = mn_debug_info
+    f = fl_debug_info
+    all_info = [
+        (b"changelog", b"cl", c),
+        (b"manifests", b"mn", m),
+        (b"files", b"fl", f),
+    ]
+    _dbg_bdl_line(ui, 0, b'revisions', d['revision-total'])
+    _dbg_bdl_line(ui, 1, b'changelog', d['revision-changelog'])
+    _dbg_bdl_line(ui, 1, b'manifest', d['revision-manifest'])
+    extra = b'(for %d revlogs)' % d['file-count']
+    _dbg_bdl_line(ui, 1, b'files', d['revision-files'], extra=extra)
+    if d['merge-total']:
+        _dbg_bdl_line(ui, 1, b'merge', d['merge-total'], d['revision-total'])
+    for k, __, v in all_info:
+        if v['merge-total']:
+            _dbg_bdl_line(ui, 2, k, v['merge-total'], v['revision-total'])
+
+    _dbg_bdl_line(ui, 0, b'deltas')
+    _dbg_bdl_line(
+        ui,
+        1,
+        b'from-storage',
+        d['reused-storage-delta'],
+        percentage_base=d['available-delta'],
+        percentage_key=b"available",
+        percentage_ref=d['available-delta'],
+    )
+
+    if d['denied-delta-candeltafn']:
+        _dbg_bdl_line(ui, 2, b'denied-fn', d['denied-delta-candeltafn'])
+    for __, k, v in all_info:
+        if v['denied-delta-candeltafn']:
+            _dbg_bdl_line(ui, 3, k, v['denied-delta-candeltafn'])
+
+    if d['denied-base-not-available']:
+        _dbg_bdl_line(ui, 2, b'denied-nb', d['denied-base-not-available'])
+    for k, __, v in all_info:
+        if v['denied-base-not-available']:
+            _dbg_bdl_line(ui, 3, k, v['denied-base-not-available'])
+
+    if d['computed-delta']:
+        _dbg_bdl_line(ui, 1, b'computed', d['computed-delta'])
+
+    if d['available-full']:
+        _dbg_bdl_line(
+            ui,
+            2,
+            b'full',
+            d['delta-full'],
+            percentage_base=d['available-full'],
+            percentage_key=b"native",
+            percentage_ref=d['available-full'],
+        )
+    for k, __, v in all_info:
+        if v['available-full']:
+            _dbg_bdl_line(
+                ui,
+                3,
+                k,
+                v['delta-full'],
+                percentage_base=v['available-full'],
+                percentage_key=b"native",
+                percentage_ref=v['available-full'],
+            )
+
+    if d['delta-against-prev']:
+        _dbg_bdl_line(ui, 2, b'previous', d['delta-against-prev'])
+    for k, __, v in all_info:
+        if v['delta-against-prev']:
+            _dbg_bdl_line(ui, 3, k, v['delta-against-prev'])
+
+    if d['delta-against-p1']:
+        _dbg_bdl_line(ui, 2, b'parent-1', d['delta-against-prev'])
+    for k, __, v in all_info:
+        if v['delta-against-p1']:
+            _dbg_bdl_line(ui, 3, k, v['delta-against-p1'])
+
+
 class cgpacker:
     def __init__(
         self,
@@ -1086,13 +1482,21 @@
             self._verbosenote = lambda s: None
 
     def generate(
-        self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
+        self,
+        commonrevs,
+        clnodes,
+        fastpathlinkrev,
+        source,
+        changelog=True,
     ):
         """Yield a sequence of changegroup byte chunks.
         If changelog is False, changelog data won't be added to changegroup
         """
 
+        debug_info = None
         repo = self._repo
+        if repo.ui.configbool(b'debug', b'bundling-stats'):
+            debug_info = make_debug_info()
         cl = repo.changelog
 
         self._verbosenote(_(b'uncompressed size of bundle content:\n'))
@@ -1107,14 +1511,19 @@
                 # correctly advertise its sidedata categories directly.
                 remote_sidedata = repo._wanted_sidedata
             sidedata_helpers = sidedatamod.get_sidedata_helpers(
-                repo, remote_sidedata
+                repo,
+                remote_sidedata,
             )
 
+        cl_debug_info = None
+        if debug_info is not None:
+            cl_debug_info = make_debug_info()
         clstate, deltas = self._generatechangelog(
             cl,
             clnodes,
             generate=changelog,
             sidedata_helpers=sidedata_helpers,
+            debug_info=cl_debug_info,
         )
         for delta in deltas:
             for chunk in _revisiondeltatochunks(
@@ -1126,6 +1535,9 @@
         close = closechunk()
         size += len(close)
         yield closechunk()
+        if debug_info is not None:
+            merge_debug_info(debug_info, cl_debug_info)
+            debug_info['revision-changelog'] = cl_debug_info['revision-total']
 
         self._verbosenote(_(b'%8.i (changelog)\n') % size)
 
@@ -1133,6 +1545,9 @@
         manifests = clstate[b'manifests']
         changedfiles = clstate[b'changedfiles']
 
+        if debug_info is not None:
+            debug_info['file-count'] = len(changedfiles)
+
         # We need to make sure that the linkrev in the changegroup refers to
         # the first changeset that introduced the manifest or file revision.
         # The fastpath is usually safer than the slowpath, because the filelogs
@@ -1156,6 +1571,9 @@
         fnodes = {}  # needed file nodes
 
         size = 0
+        mn_debug_info = None
+        if debug_info is not None:
+            mn_debug_info = make_debug_info()
         it = self.generatemanifests(
             commonrevs,
             clrevorder,
@@ -1165,6 +1583,7 @@
             source,
             clstate[b'clrevtomanifestrev'],
             sidedata_helpers=sidedata_helpers,
+            debug_info=mn_debug_info,
         )
 
         for tree, deltas in it:
@@ -1185,6 +1604,9 @@
             close = closechunk()
             size += len(close)
             yield close
+        if debug_info is not None:
+            merge_debug_info(debug_info, mn_debug_info)
+            debug_info['revision-manifest'] = mn_debug_info['revision-total']
 
         self._verbosenote(_(b'%8.i (manifests)\n') % size)
         yield self._manifestsend
@@ -1199,6 +1621,9 @@
         manifests.clear()
         clrevs = {cl.rev(x) for x in clnodes}
 
+        fl_debug_info = None
+        if debug_info is not None:
+            fl_debug_info = make_debug_info()
         it = self.generatefiles(
             changedfiles,
             commonrevs,
@@ -1208,6 +1633,7 @@
             fnodes,
             clrevs,
             sidedata_helpers=sidedata_helpers,
+            debug_info=fl_debug_info,
         )
 
         for path, deltas in it:
@@ -1230,12 +1656,29 @@
             self._verbosenote(_(b'%8.i  %s\n') % (size, path))
 
         yield closechunk()
+        if debug_info is not None:
+            merge_debug_info(debug_info, fl_debug_info)
+            debug_info['revision-files'] = fl_debug_info['revision-total']
+
+        if debug_info is not None:
+            display_bundling_debug_info(
+                repo.ui,
+                debug_info,
+                cl_debug_info,
+                mn_debug_info,
+                fl_debug_info,
+            )
 
         if clnodes:
             repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
 
     def _generatechangelog(
-        self, cl, nodes, generate=True, sidedata_helpers=None
+        self,
+        cl,
+        nodes,
+        generate=True,
+        sidedata_helpers=None,
+        debug_info=None,
     ):
         """Generate data for changelog chunks.
 
@@ -1332,6 +1775,7 @@
             fullclnodes=self._fullclnodes,
             precomputedellipsis=self._precomputedellipsis,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
         return state, gen
@@ -1346,6 +1790,7 @@
         source,
         clrevtolocalrev,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         """Returns an iterator of changegroup chunks containing manifests.
 
@@ -1444,6 +1889,7 @@
                 fullclnodes=self._fullclnodes,
                 precomputedellipsis=self._precomputedellipsis,
                 sidedata_helpers=sidedata_helpers,
+                debug_info=debug_info,
             )
 
             if not self._oldmatcher.visitdir(store.tree[:-1]):
@@ -1483,6 +1929,7 @@
         fnodes,
         clrevs,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         changedfiles = [
             f
@@ -1578,6 +2025,7 @@
                 fullclnodes=self._fullclnodes,
                 precomputedellipsis=self._precomputedellipsis,
                 sidedata_helpers=sidedata_helpers,
+                debug_info=debug_info,
             )
 
             yield fname, deltas
@@ -1867,7 +2315,12 @@
 
 
 def makechangegroup(
-    repo, outgoing, version, source, fastpath=False, bundlecaps=None
+    repo,
+    outgoing,
+    version,
+    source,
+    fastpath=False,
+    bundlecaps=None,
 ):
     cgstream = makestream(
         repo,
@@ -1917,7 +2370,12 @@
 
     repo.hook(b'preoutgoing', throw=True, source=source)
     _changegroupinfo(repo, csets, source)
-    return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
+    return bundler.generate(
+        commonrevs,
+        csets,
+        fastpathlinkrev,
+        source,
+    )
 
 
 def _addchangegroupfiles(
@@ -1928,6 +2386,8 @@
     expectedfiles,
     needfiles,
     addrevisioncb=None,
+    debug_info=None,
+    delta_base_reuse_policy=None,
 ):
     revisions = 0
     files = 0
@@ -1948,6 +2408,8 @@
                 revmap,
                 trp,
                 addrevisioncb=addrevisioncb,
+                debug_info=debug_info,
+                delta_base_reuse_policy=delta_base_reuse_policy,
             )
             if not added:
                 raise error.Abort(_(b"received file revlog group is empty"))
--- a/mercurial/cmdutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/cmdutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -8,9 +8,19 @@
 
 import copy as copymod
 import errno
+import functools
 import os
 import re
 
+from typing import (
+    Any,
+    AnyStr,
+    Dict,
+    Iterable,
+    Optional,
+    cast,
+)
+
 from .i18n import _
 from .node import (
     hex,
@@ -29,7 +39,6 @@
     changelog,
     copies,
     crecord as crecordmod,
-    dirstateguard,
     encoding,
     error,
     formatter,
@@ -65,14 +74,10 @@
 )
 
 if pycompat.TYPE_CHECKING:
-    from typing import (
-        Any,
-        Dict,
+    from . import (
+        ui as uimod,
     )
 
-    for t in (Any, Dict):
-        assert t
-
 stringio = util.stringio
 
 # templates of common command options
@@ -269,13 +274,16 @@
 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
 
 
-def check_at_most_one_arg(opts, *args):
+def check_at_most_one_arg(
+    opts: Dict[AnyStr, Any],
+    *args: AnyStr,
+) -> Optional[AnyStr]:
     """abort if more than one of the arguments are in opts
 
     Returns the unique argument or None if none of them were specified.
     """
 
-    def to_display(name):
+    def to_display(name: AnyStr) -> bytes:
         return pycompat.sysbytes(name).replace(b'_', b'-')
 
     previous = None
@@ -290,7 +298,11 @@
     return previous
 
 
-def check_incompatible_arguments(opts, first, others):
+def check_incompatible_arguments(
+    opts: Dict[AnyStr, Any],
+    first: AnyStr,
+    others: Iterable[AnyStr],
+) -> None:
     """abort if the first argument is given along with any of the others
 
     Unlike check_at_most_one_arg(), `others` are not mutually exclusive
@@ -300,7 +312,7 @@
         check_at_most_one_arg(opts, first, other)
 
 
-def resolve_commit_options(ui, opts):
+def resolve_commit_options(ui: "uimod.ui", opts: Dict[str, Any]) -> bool:
     """modify commit options dict to handle related options
 
     The return value indicates that ``rewrite.update-timestamp`` is the reason
@@ -327,7 +339,7 @@
     return datemaydiffer
 
 
-def check_note_size(opts):
+def check_note_size(opts: Dict[str, Any]) -> None:
     """make sure note is of valid format"""
 
     note = opts.get('note')
@@ -429,6 +441,227 @@
     return newchunks, newopts
 
 
+def _record(
+    ui,
+    repo,
+    message,
+    match,
+    opts,
+    commitfunc,
+    backupall,
+    filterfn,
+    pats,
+):
+    """This is generic record driver.
+
+    Its job is to interactively filter local changes, and
+    accordingly prepare working directory into a state in which the
+    job can be delegated to a non-interactive commit command such as
+    'commit' or 'qrefresh'.
+
+    After the actual job is done by non-interactive command, the
+    working directory is restored to its original state.
+
+    In the end we'll record interesting changes, and everything else
+    will be left in place, so the user can continue working.
+    """
+    assert repo.currentwlock() is not None
+    if not opts.get(b'interactive-unshelve'):
+        checkunfinished(repo, commit=True)
+    wctx = repo[None]
+    merge = len(wctx.parents()) > 1
+    if merge:
+        raise error.InputError(
+            _(b'cannot partially commit a merge ' b'(use "hg commit" instead)')
+        )
+
+    def fail(f, msg):
+        raise error.InputError(b'%s: %s' % (f, msg))
+
+    force = opts.get(b'force')
+    if not force:
+        match = matchmod.badmatch(match, fail)
+
+    status = repo.status(match=match)
+
+    overrides = {(b'ui', b'commitsubrepos'): True}
+
+    with repo.ui.configoverride(overrides, b'record'):
+        # subrepoutil.precommit() modifies the status
+        tmpstatus = scmutil.status(
+            copymod.copy(status.modified),
+            copymod.copy(status.added),
+            copymod.copy(status.removed),
+            copymod.copy(status.deleted),
+            copymod.copy(status.unknown),
+            copymod.copy(status.ignored),
+            copymod.copy(status.clean),  # pytype: disable=wrong-arg-count
+        )
+
+        # Force allows -X subrepo to skip the subrepo.
+        subs, commitsubs, newstate = subrepoutil.precommit(
+            repo.ui, wctx, tmpstatus, match, force=True
+        )
+        for s in subs:
+            if s in commitsubs:
+                dirtyreason = wctx.sub(s).dirtyreason(True)
+                raise error.Abort(dirtyreason)
+
+    if not force:
+        repo.checkcommitpatterns(wctx, match, status, fail)
+    diffopts = patch.difffeatureopts(
+        ui,
+        opts=opts,
+        whitespace=True,
+        section=b'commands',
+        configprefix=b'commit.interactive.',
+    )
+    diffopts.nodates = True
+    diffopts.git = True
+    diffopts.showfunc = True
+    originaldiff = patch.diff(repo, changes=status, opts=diffopts)
+    original_headers = patch.parsepatch(originaldiff)
+    match = scmutil.match(repo[None], pats)
+
+    # 1. filter patch, since we are intending to apply subset of it
+    try:
+        chunks, newopts = filterfn(ui, original_headers, match)
+    except error.PatchParseError as err:
+        raise error.InputError(_(b'error parsing patch: %s') % err)
+    except error.PatchApplicationError as err:
+        raise error.StateError(_(b'error applying patch: %s') % err)
+    opts.update(newopts)
+
+    # We need to keep a backup of files that have been newly added and
+    # modified during the recording process because there is a previous
+    # version without the edit in the workdir. We also will need to restore
+    # files that were the sources of renames so that the patch application
+    # works.
+    newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
+    contenders = set()
+    for h in chunks:
+        if isheader(h):
+            contenders.update(set(h.files()))
+
+    changed = status.modified + status.added + status.removed
+    newfiles = [f for f in changed if f in contenders]
+    if not newfiles:
+        ui.status(_(b'no changes to record\n'))
+        return 0
+
+    modified = set(status.modified)
+
+    # 2. backup changed files, so we can restore them in the end
+
+    if backupall:
+        tobackup = changed
+    else:
+        tobackup = [
+            f
+            for f in newfiles
+            if f in modified or f in newlyaddedandmodifiedfiles
+        ]
+    backups = {}
+    if tobackup:
+        backupdir = repo.vfs.join(b'record-backups')
+        try:
+            os.mkdir(backupdir)
+        except FileExistsError:
+            pass
+    try:
+        # backup continues
+        for f in tobackup:
+            fd, tmpname = pycompat.mkstemp(
+                prefix=os.path.basename(f) + b'.', dir=backupdir
+            )
+            os.close(fd)
+            ui.debug(b'backup %r as %r\n' % (f, tmpname))
+            util.copyfile(repo.wjoin(f), tmpname, copystat=True)
+            backups[f] = tmpname
+
+        fp = stringio()
+        for c in chunks:
+            fname = c.filename()
+            if fname in backups:
+                c.write(fp)
+        dopatch = fp.tell()
+        fp.seek(0)
+
+        # 2.5 optionally review / modify patch in text editor
+        if opts.get(b'review', False):
+            patchtext = (
+                crecordmod.diffhelptext + crecordmod.patchhelptext + fp.read()
+            )
+            reviewedpatch = ui.edit(
+                patchtext, b"", action=b"diff", repopath=repo.path
+            )
+            fp.truncate(0)
+            fp.write(reviewedpatch)
+            fp.seek(0)
+
+        [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
+        # 3a. apply filtered patch to clean repo  (clean)
+        if backups:
+            m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
+            mergemod.revert_to(repo[b'.'], matcher=m)
+
+        # 3b. (apply)
+        if dopatch:
+            try:
+                ui.debug(b'applying patch\n')
+                ui.debug(fp.getvalue())
+                patch.internalpatch(ui, repo, fp, 1, eolmode=None)
+            except error.PatchParseError as err:
+                raise error.InputError(pycompat.bytestr(err))
+            except error.PatchApplicationError as err:
+                raise error.StateError(pycompat.bytestr(err))
+        del fp
+
+        # 4. We prepared working directory according to filtered
+        #    patch. Now is the time to delegate the job to
+        #    commit/qrefresh or the like!
+
+        # Make all of the pathnames absolute.
+        newfiles = [repo.wjoin(nf) for nf in newfiles]
+        return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
+    finally:
+        # 5. finally restore backed-up files
+        try:
+            dirstate = repo.dirstate
+            for realname, tmpname in backups.items():
+                ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
+
+                if dirstate.get_entry(realname).maybe_clean:
+                    # without normallookup, restoring timestamp
+                    # may cause partially committed files
+                    # to be treated as unmodified
+
+                    # XXX-PENDINGCHANGE: We should clarify the context in
+                    # which this function is called  to make sure it
+                    # already called within a `pendingchange`, However we
+                    # are taking a shortcut here in order to be able to
+                    # quickly deprecated the older API.
+                    with dirstate.changing_parents(repo):
+                        dirstate.update_file(
+                            realname,
+                            p1_tracked=True,
+                            wc_tracked=True,
+                            possibly_dirty=True,
+                        )
+
+                # copystat=True here and above are a hack to trick any
+                # editors that have f open that we haven't modified them.
+                #
+                # Also note that this racy as an editor could notice the
+                # file's mtime before we've finished writing it.
+                util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
+                os.unlink(tmpname)
+            if tobackup:
+                os.rmdir(backupdir)
+        except OSError:
+            pass
+
+
 def dorecord(
     ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
 ):
@@ -444,225 +677,15 @@
     if not opts.get(b'user'):
         ui.username()  # raise exception, username not provided
 
-    def recordfunc(ui, repo, message, match, opts):
-        """This is generic record driver.
-
-        Its job is to interactively filter local changes, and
-        accordingly prepare working directory into a state in which the
-        job can be delegated to a non-interactive commit command such as
-        'commit' or 'qrefresh'.
-
-        After the actual job is done by non-interactive command, the
-        working directory is restored to its original state.
-
-        In the end we'll record interesting changes, and everything else
-        will be left in place, so the user can continue working.
-        """
-        if not opts.get(b'interactive-unshelve'):
-            checkunfinished(repo, commit=True)
-        wctx = repo[None]
-        merge = len(wctx.parents()) > 1
-        if merge:
-            raise error.InputError(
-                _(
-                    b'cannot partially commit a merge '
-                    b'(use "hg commit" instead)'
-                )
-            )
-
-        def fail(f, msg):
-            raise error.InputError(b'%s: %s' % (f, msg))
-
-        force = opts.get(b'force')
-        if not force:
-            match = matchmod.badmatch(match, fail)
-
-        status = repo.status(match=match)
-
-        overrides = {(b'ui', b'commitsubrepos'): True}
-
-        with repo.ui.configoverride(overrides, b'record'):
-            # subrepoutil.precommit() modifies the status
-            tmpstatus = scmutil.status(
-                copymod.copy(status.modified),
-                copymod.copy(status.added),
-                copymod.copy(status.removed),
-                copymod.copy(status.deleted),
-                copymod.copy(status.unknown),
-                copymod.copy(status.ignored),
-                copymod.copy(status.clean),  # pytype: disable=wrong-arg-count
-            )
-
-            # Force allows -X subrepo to skip the subrepo.
-            subs, commitsubs, newstate = subrepoutil.precommit(
-                repo.ui, wctx, tmpstatus, match, force=True
-            )
-            for s in subs:
-                if s in commitsubs:
-                    dirtyreason = wctx.sub(s).dirtyreason(True)
-                    raise error.Abort(dirtyreason)
-
-        if not force:
-            repo.checkcommitpatterns(wctx, match, status, fail)
-        diffopts = patch.difffeatureopts(
-            ui,
-            opts=opts,
-            whitespace=True,
-            section=b'commands',
-            configprefix=b'commit.interactive.',
-        )
-        diffopts.nodates = True
-        diffopts.git = True
-        diffopts.showfunc = True
-        originaldiff = patch.diff(repo, changes=status, opts=diffopts)
-        original_headers = patch.parsepatch(originaldiff)
-        match = scmutil.match(repo[None], pats)
-
-        # 1. filter patch, since we are intending to apply subset of it
-        try:
-            chunks, newopts = filterfn(ui, original_headers, match)
-        except error.PatchParseError as err:
-            raise error.InputError(_(b'error parsing patch: %s') % err)
-        except error.PatchApplicationError as err:
-            raise error.StateError(_(b'error applying patch: %s') % err)
-        opts.update(newopts)
-
-        # We need to keep a backup of files that have been newly added and
-        # modified during the recording process because there is a previous
-        # version without the edit in the workdir. We also will need to restore
-        # files that were the sources of renames so that the patch application
-        # works.
-        newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
-        contenders = set()
-        for h in chunks:
-            if isheader(h):
-                contenders.update(set(h.files()))
-
-        changed = status.modified + status.added + status.removed
-        newfiles = [f for f in changed if f in contenders]
-        if not newfiles:
-            ui.status(_(b'no changes to record\n'))
-            return 0
-
-        modified = set(status.modified)
-
-        # 2. backup changed files, so we can restore them in the end
-
-        if backupall:
-            tobackup = changed
-        else:
-            tobackup = [
-                f
-                for f in newfiles
-                if f in modified or f in newlyaddedandmodifiedfiles
-            ]
-        backups = {}
-        if tobackup:
-            backupdir = repo.vfs.join(b'record-backups')
-            try:
-                os.mkdir(backupdir)
-            except FileExistsError:
-                pass
-        try:
-            # backup continues
-            for f in tobackup:
-                fd, tmpname = pycompat.mkstemp(
-                    prefix=os.path.basename(f) + b'.', dir=backupdir
-                )
-                os.close(fd)
-                ui.debug(b'backup %r as %r\n' % (f, tmpname))
-                util.copyfile(repo.wjoin(f), tmpname, copystat=True)
-                backups[f] = tmpname
-
-            fp = stringio()
-            for c in chunks:
-                fname = c.filename()
-                if fname in backups:
-                    c.write(fp)
-            dopatch = fp.tell()
-            fp.seek(0)
-
-            # 2.5 optionally review / modify patch in text editor
-            if opts.get(b'review', False):
-                patchtext = (
-                    crecordmod.diffhelptext
-                    + crecordmod.patchhelptext
-                    + fp.read()
-                )
-                reviewedpatch = ui.edit(
-                    patchtext, b"", action=b"diff", repopath=repo.path
-                )
-                fp.truncate(0)
-                fp.write(reviewedpatch)
-                fp.seek(0)
-
-            [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
-            # 3a. apply filtered patch to clean repo  (clean)
-            if backups:
-                m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
-                mergemod.revert_to(repo[b'.'], matcher=m)
-
-            # 3b. (apply)
-            if dopatch:
-                try:
-                    ui.debug(b'applying patch\n')
-                    ui.debug(fp.getvalue())
-                    patch.internalpatch(ui, repo, fp, 1, eolmode=None)
-                except error.PatchParseError as err:
-                    raise error.InputError(pycompat.bytestr(err))
-                except error.PatchApplicationError as err:
-                    raise error.StateError(pycompat.bytestr(err))
-            del fp
-
-            # 4. We prepared working directory according to filtered
-            #    patch. Now is the time to delegate the job to
-            #    commit/qrefresh or the like!
-
-            # Make all of the pathnames absolute.
-            newfiles = [repo.wjoin(nf) for nf in newfiles]
-            return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
-        finally:
-            # 5. finally restore backed-up files
-            try:
-                dirstate = repo.dirstate
-                for realname, tmpname in backups.items():
-                    ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
-
-                    if dirstate.get_entry(realname).maybe_clean:
-                        # without normallookup, restoring timestamp
-                        # may cause partially committed files
-                        # to be treated as unmodified
-
-                        # XXX-PENDINGCHANGE: We should clarify the context in
-                        # which this function is called  to make sure it
-                        # already called within a `pendingchange`, However we
-                        # are taking a shortcut here in order to be able to
-                        # quickly deprecated the older API.
-                        with dirstate.parentchange():
-                            dirstate.update_file(
-                                realname,
-                                p1_tracked=True,
-                                wc_tracked=True,
-                                possibly_dirty=True,
-                            )
-
-                    # copystat=True here and above are a hack to trick any
-                    # editors that have f open that we haven't modified them.
-                    #
-                    # Also note that this racy as an editor could notice the
-                    # file's mtime before we've finished writing it.
-                    util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
-                    os.unlink(tmpname)
-                if tobackup:
-                    os.rmdir(backupdir)
-            except OSError:
-                pass
-
-    def recordinwlock(ui, repo, message, match, opts):
-        with repo.wlock():
-            return recordfunc(ui, repo, message, match, opts)
-
-    return commit(ui, repo, recordinwlock, pats, opts)
+    func = functools.partial(
+        _record,
+        commitfunc=commitfunc,
+        backupall=backupall,
+        filterfn=filterfn,
+        pats=pats,
+    )
+
+    return commit(ui, repo, func, pats, opts)
 
 
 class dirnode:
@@ -1115,12 +1138,12 @@
         ctx.sub(s).bailifchanged(hint=hint)
 
 
-def logmessage(ui, opts):
+def logmessage(ui: "uimod.ui", opts: Dict[bytes, Any]) -> Optional[bytes]:
     """get the log message according to -m and -l option"""
 
     check_at_most_one_arg(opts, b'message', b'logfile')
 
-    message = opts.get(b'message')
+    message = cast(Optional[bytes], opts.get(b'message'))
     logfile = opts.get(b'logfile')
 
     if not message and logfile:
@@ -1465,7 +1488,7 @@
     return openstorage(repo, cmd, file_, opts, returnrevlog=True)
 
 
-def copy(ui, repo, pats, opts, rename=False):
+def copy(ui, repo, pats, opts: Dict[bytes, Any], rename=False):
     check_incompatible_arguments(opts, b'forget', [b'dry_run'])
 
     # called with the repo lock held
@@ -1532,7 +1555,7 @@
                 new_node = mem_ctx.commit()
 
                 if repo.dirstate.p1() == ctx.node():
-                    with repo.dirstate.parentchange():
+                    with repo.dirstate.changing_parents(repo):
                         scmutil.movedirstate(repo, repo[new_node])
                 replacements = {ctx.node(): [new_node]}
                 scmutil.cleanupnodes(
@@ -1625,7 +1648,7 @@
             new_node = mem_ctx.commit()
 
             if repo.dirstate.p1() == ctx.node():
-                with repo.dirstate.parentchange():
+                with repo.dirstate.changing_parents(repo):
                     scmutil.movedirstate(repo, repo[new_node])
             replacements = {ctx.node(): [new_node]}
             scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
@@ -2008,7 +2031,9 @@
             repo.setparents(p1.node(), p2.node())
 
         if opts.get(b'exact') or importbranch:
-            repo.dirstate.setbranch(branch or b'default')
+            repo.dirstate.setbranch(
+                branch or b'default', repo.currenttransaction()
+            )
 
         partial = opts.get(b'partial', False)
         files = set()
@@ -2778,7 +2803,7 @@
                 basefm,
                 fntemplate,
                 subprefix,
-                **pycompat.strkwargs(opts)
+                **pycompat.strkwargs(opts),
             ):
                 err = 0
         except error.RepoLookupError:
@@ -2789,29 +2814,135 @@
     return err
 
 
+class _AddRemoveContext:
+    """a small (hacky) context to deal with lazy opening of context
+
+    This is to be used in the `commit` function right below. This deals with
+    lazily open a `changing_files` context inside a `transaction` that span the
+    full commit operation.
+
+    We need :
+    - a `changing_files` context to wrap the dirstate change within the
+      "addremove" operation,
+    - a transaction to make sure these change are not written right after the
+      addremove, but when the commit operation succeed.
+
+    However it get complicated because:
+    - opening a transaction "this early" shuffle hooks order, especially the
+      `precommit` one happening after the `pretxtopen` one which I am not too
+      enthusiastic about.
+    - the `mq` extensions + the `record` extension stacks many layers of call
+      to implement `qrefresh --interactive` and this result with `mq` calling a
+      `strip` in the middle of this function. Which prevent the existence of
+      transaction wrapping all of its function code. (however, `qrefresh` never
+      call the `addremove` bits.
+    - the largefile extensions (and maybe other extensions?) wraps `addremove`
+      so slicing `addremove` in smaller bits is a complex endeavour.
+
+    So I eventually took a this shortcut that open the transaction if we
+    actually needs it, not disturbing much of the rest of the code.
+
+    It will result in some hooks order change for `hg commit --addremove`,
+    however it seems a corner case enough to ignore that for now (hopefully).
+
+    Notes that None of the above problems seems insurmountable, however I have
+    been fighting with this specific piece of code for a couple of day already
+    and I need a solution to keep moving forward on the bigger work around
+    `changing_files` context that is being introduced at the same time as this
+    hack.
+
+    Each problem seems to have a solution:
+    - the hook order issue could be solved by refactoring the many-layer stack
+      that currently composes a commit and calling them earlier,
+    - the mq issue could be solved by refactoring `mq` so that the final strip
+      is done after transaction closure. Be warned that the mq code is quite
+      antic however.
+    - large-file could be reworked in parallel of the `addremove` to be
+      friendlier to this.
+
+    However each of these tasks are too much a diversion right now. In addition
+    they will be much easier to undertake when the `changing_files` dust has
+    settled."""
+
+    def __init__(self, repo):
+        self._repo = repo
+        self._transaction = None
+        self._dirstate_context = None
+        self._state = None
+
+    def __enter__(self):
+        assert self._state is None
+        self._state = True
+        return self
+
+    def open_transaction(self):
+        """open a `transaction` and `changing_files` context
+
+        Call this when you know that change to the dirstate will be needed and
+        we need to open the transaction early
+
+        This will also open the dirstate `changing_files` context, so you should
+        call `close_dirstate_context` when the distate changes are done.
+        """
+        assert self._state is not None
+        if self._transaction is None:
+            self._transaction = self._repo.transaction(b'commit')
+            self._transaction.__enter__()
+        if self._dirstate_context is None:
+            self._dirstate_context = self._repo.dirstate.changing_files(
+                self._repo
+            )
+            self._dirstate_context.__enter__()
+
+    def close_dirstate_context(self):
+        """close the change_files if any
+
+        Call this after the (potential) `open_transaction` call to close the
+        (potential) changing_files context.
+        """
+        if self._dirstate_context is not None:
+            self._dirstate_context.__exit__(None, None, None)
+            self._dirstate_context = None
+
+    def __exit__(self, *args):
+        if self._dirstate_context is not None:
+            self._dirstate_context.__exit__(*args)
+        if self._transaction is not None:
+            self._transaction.__exit__(*args)
+
+
 def commit(ui, repo, commitfunc, pats, opts):
     '''commit the specified files or all outstanding changes'''
     date = opts.get(b'date')
     if date:
         opts[b'date'] = dateutil.parsedate(date)
-    message = logmessage(ui, opts)
-    matcher = scmutil.match(repo[None], pats, opts)
-
-    dsguard = None
-    # extract addremove carefully -- this function can be called from a command
-    # that doesn't support addremove
-    if opts.get(b'addremove'):
-        dsguard = dirstateguard.dirstateguard(repo, b'commit')
-    with dsguard or util.nullcontextmanager():
-        if dsguard:
-            relative = scmutil.anypats(pats, opts)
-            uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
-            if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
-                raise error.Abort(
-                    _(b"failed to mark all new/missing files as added/removed")
+
+    with repo.wlock(), repo.lock():
+        message = logmessage(ui, opts)
+        matcher = scmutil.match(repo[None], pats, opts)
+
+        with _AddRemoveContext(repo) as c:
+            # extract addremove carefully -- this function can be called from a
+            # command that doesn't support addremove
+            if opts.get(b'addremove'):
+                relative = scmutil.anypats(pats, opts)
+                uipathfn = scmutil.getuipathfn(
+                    repo,
+                    legacyrelativevalue=relative,
                 )
-
-        return commitfunc(ui, repo, message, matcher, opts)
+                r = scmutil.addremove(
+                    repo,
+                    matcher,
+                    b"",
+                    uipathfn,
+                    opts,
+                    open_tr=c.open_transaction,
+                )
+                m = _(b"failed to mark all new/missing files as added/removed")
+                if r != 0:
+                    raise error.Abort(m)
+            c.close_dirstate_context()
+            return commitfunc(ui, repo, message, matcher, opts)
 
 
 def samefile(f, ctx1, ctx2):
@@ -2826,7 +2957,7 @@
         return f not in ctx2.manifest()
 
 
-def amend(ui, repo, old, extra, pats, opts):
+def amend(ui, repo, old, extra, pats, opts: Dict[str, Any]):
     # avoid cycle context -> subrepo -> cmdutil
     from . import context
 
@@ -2880,12 +3011,13 @@
         matcher = scmutil.match(wctx, pats, opts)
         relative = scmutil.anypats(pats, opts)
         uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
-        if opts.get(b'addremove') and scmutil.addremove(
-            repo, matcher, b"", uipathfn, opts
-        ):
-            raise error.Abort(
-                _(b"failed to mark all new/missing files as added/removed")
-            )
+        if opts.get(b'addremove'):
+            with repo.dirstate.changing_files(repo):
+                if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
+                    m = _(
+                        b"failed to mark all new/missing files as added/removed"
+                    )
+                    raise error.Abort(m)
 
         # Check subrepos. This depends on in-place wctx._status update in
         # subrepo.precommit(). To minimize the risk of this hack, we do
@@ -3019,10 +3151,12 @@
         commitphase = None
         if opts.get(b'secret'):
             commitphase = phases.secret
+        elif opts.get(b'draft'):
+            commitphase = phases.draft
         newid = repo.commitctx(new)
         ms.reset()
 
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             # Reroute the working copy parent to the new changeset
             repo.setparents(newid, repo.nullid)
 
@@ -3285,7 +3419,7 @@
     names = {}
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
 
-    with repo.wlock():
+    with repo.wlock(), repo.dirstate.changing_files(repo):
         ## filling of the `names` mapping
         # walk dirstate to fill `names`
 
--- a/mercurial/commands.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/commands.py	Thu Mar 02 22:45:44 2023 +0100
@@ -13,6 +13,7 @@
 from .i18n import _
 from .node import (
     hex,
+    nullid,
     nullrev,
     short,
     wdirrev,
@@ -28,7 +29,6 @@
     copies,
     debugcommands as debugcommandsmod,
     destutil,
-    dirstateguard,
     discovery,
     encoding,
     error,
@@ -252,10 +252,11 @@
     Returns 0 if all files are successfully added.
     """
 
-    m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
-    uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-    rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts)
-    return rejected and 1 or 0
+    with repo.wlock(), repo.dirstate.changing_files(repo):
+        m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
+        uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
+        rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts)
+        return rejected and 1 or 0
 
 
 @command(
@@ -330,10 +331,11 @@
     opts = pycompat.byteskwargs(opts)
     if not opts.get(b'similarity'):
         opts[b'similarity'] = b'100'
-    matcher = scmutil.match(repo[None], pats, opts)
-    relative = scmutil.anypats(pats, opts)
-    uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
-    return scmutil.addremove(repo, matcher, b"", uipathfn, opts)
+    with repo.wlock(), repo.dirstate.changing_files(repo):
+        matcher = scmutil.match(repo[None], pats, opts)
+        relative = scmutil.anypats(pats, opts)
+        uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
+        return scmutil.addremove(repo, matcher, b"", uipathfn, opts)
 
 
 @command(
@@ -822,7 +824,7 @@
     bheads = repo.branchheads(branch)
     rctx = scmutil.revsingle(repo, hex(parent))
     if not opts.get(b'merge') and op1 != node:
-        with dirstateguard.dirstateguard(repo, b'backout'):
+        with repo.transaction(b"backout"):
             overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
             with ui.configoverride(overrides, b'backout'):
                 stats = mergemod.back_out(ctx, parent=repo[parent])
@@ -835,7 +837,7 @@
             return 1
     else:
         hg.clean(repo, node, show_stats=False)
-        repo.dirstate.setbranch(branch)
+        repo.dirstate.setbranch(branch, repo.currenttransaction())
         cmdutil.revert(ui, repo, rctx)
 
     if opts.get(b'no_commit'):
@@ -1353,7 +1355,7 @@
     with repo.wlock():
         if opts.get(b'clean'):
             label = repo[b'.'].branch()
-            repo.dirstate.setbranch(label)
+            repo.dirstate.setbranch(label, repo.currenttransaction())
             ui.status(_(b'reset working directory to branch %s\n') % label)
         elif label:
 
@@ -1369,7 +1371,7 @@
                         hint=_(b"use 'hg update' to switch to it"),
                     )
 
-            repo.dirstate.setbranch(label)
+            repo.dirstate.setbranch(label, repo.currenttransaction())
             ui.status(_(b'marked working directory as branch %s\n') % label)
 
             # find any open named branches aside from default
@@ -1635,7 +1637,7 @@
         missing = set()
         excluded = set()
         for path in urlutil.get_push_paths(repo, ui, dests):
-            other = hg.peer(repo, opts, path.rawloc)
+            other = hg.peer(repo, opts, path)
             if revs is not None:
                 hex_revs = [repo[r].hex() for r in revs]
             else:
@@ -2008,6 +2010,7 @@
         (b'', b'close-branch', None, _(b'mark a branch head as closed')),
         (b'', b'amend', None, _(b'amend the parent of the working directory')),
         (b's', b'secret', None, _(b'use the secret phase for committing')),
+        (b'', b'draft', None, _(b'use the draft phase for committing')),
         (b'e', b'edit', None, _(b'invoke editor on commit messages')),
         (
             b'',
@@ -2082,6 +2085,8 @@
 
           hg commit --amend --date now
     """
+    cmdutil.check_at_most_one_arg(opts, 'draft', 'secret')
+    cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend'])
     with repo.wlock(), repo.lock():
         return _docommit(ui, repo, *pats, **opts)
 
@@ -2097,7 +2102,6 @@
         return 1 if ret == 0 else ret
 
     if opts.get('subrepos'):
-        cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend'])
         # Let --subrepos on the command line override config setting.
         ui.setconfig(b'ui', b'commitsubrepos', True, b'commit')
 
@@ -2174,6 +2178,8 @@
             overrides = {}
             if opts.get(b'secret'):
                 overrides[(b'phases', b'new-commit')] = b'secret'
+            elif opts.get(b'draft'):
+                overrides[(b'phases', b'new-commit')] = b'draft'
 
             baseui = repo.baseui
             with baseui.configoverride(overrides, b'commit'):
@@ -2491,7 +2497,19 @@
     Returns 0 on success, 1 if errors are encountered.
     """
     opts = pycompat.byteskwargs(opts)
-    with repo.wlock():
+
+    context = repo.dirstate.changing_files
+    rev = opts.get(b'at_rev')
+    ctx = None
+    if rev:
+        ctx = logcmdutil.revsingle(repo, rev)
+        if ctx.rev() is not None:
+
+            def context(repo):
+                return util.nullcontextmanager()
+
+            opts[b'at_rev'] = ctx.rev()
+    with repo.wlock(), context(repo):
         return cmdutil.copy(ui, repo, pats, opts)
 
 
@@ -2960,19 +2978,20 @@
     if not pats:
         raise error.InputError(_(b'no files specified'))
 
-    m = scmutil.match(repo[None], pats, opts)
-    dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
-    uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-    rejected = cmdutil.forget(
-        ui,
-        repo,
-        m,
-        prefix=b"",
-        uipathfn=uipathfn,
-        explicitonly=False,
-        dryrun=dryrun,
-        interactive=interactive,
-    )[0]
+    with repo.wlock(), repo.dirstate.changing_files(repo):
+        m = scmutil.match(repo[None], pats, opts)
+        dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
+        uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
+        rejected = cmdutil.forget(
+            ui,
+            repo,
+            m,
+            prefix=b"",
+            uipathfn=uipathfn,
+            explicitonly=False,
+            dryrun=dryrun,
+            interactive=interactive,
+        )[0]
     return rejected and 1 or 0
 
 
@@ -3911,12 +3930,11 @@
     peer = None
     try:
         if source:
-            source, branches = urlutil.get_unique_pull_path(
-                b'identify', repo, ui, source
-            )
+            path = urlutil.get_unique_pull_path_obj(b'identify', ui, source)
             # only pass ui when no repo
-            peer = hg.peer(repo or ui, opts, source)
+            peer = hg.peer(repo or ui, opts, path)
             repo = peer.local()
+            branches = (path.branch, [])
             revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
 
         fm = ui.formatter(b'identify', opts)
@@ -4229,12 +4247,10 @@
         if not opts.get(b'no_commit'):
             lock = repo.lock
             tr = lambda: repo.transaction(b'import')
-            dsguard = util.nullcontextmanager
         else:
             lock = util.nullcontextmanager
             tr = util.nullcontextmanager
-            dsguard = lambda: dirstateguard.dirstateguard(repo, b'import')
-        with lock(), tr(), dsguard():
+        with lock(), tr():
             parents = repo[None].parents()
             for patchurl in patches:
                 if patchurl == b'-':
@@ -4383,17 +4399,15 @@
     if opts.get(b'bookmarks'):
         srcs = urlutil.get_pull_paths(repo, ui, [source])
         for path in srcs:
-            source, branches = urlutil.parseurl(
-                path.rawloc, opts.get(b'branch')
-            )
-            other = hg.peer(repo, opts, source)
+            # XXX the "branches" options are not used. Should it be used?
+            other = hg.peer(repo, opts, path)
             try:
                 if b'bookmarks' not in other.listkeys(b'namespaces'):
                     ui.warn(_(b"remote doesn't support bookmarks\n"))
                     return 0
                 ui.pager(b'incoming')
                 ui.status(
-                    _(b'comparing with %s\n') % urlutil.hidepassword(source)
+                    _(b'comparing with %s\n') % urlutil.hidepassword(path.loc)
                 )
                 return bookmarks.incoming(
                     ui, repo, other, mode=path.bookmarks_mode
@@ -4426,7 +4440,7 @@
     Returns 0 on success.
     """
     opts = pycompat.byteskwargs(opts)
-    path = urlutil.get_clone_path(ui, dest)[1]
+    path = urlutil.get_clone_path_obj(ui, dest)
     peer = hg.peer(ui, opts, path, create=True)
     peer.close()
 
@@ -5038,14 +5052,13 @@
     opts = pycompat.byteskwargs(opts)
     if opts.get(b'bookmarks'):
         for path in urlutil.get_push_paths(repo, ui, dests):
-            dest = path.pushloc or path.loc
-            other = hg.peer(repo, opts, dest)
+            other = hg.peer(repo, opts, path)
             try:
                 if b'bookmarks' not in other.listkeys(b'namespaces'):
                     ui.warn(_(b"remote doesn't support bookmarks\n"))
                     return 0
                 ui.status(
-                    _(b'comparing with %s\n') % urlutil.hidepassword(dest)
+                    _(b'comparing with %s\n') % urlutil.hidepassword(path.loc)
                 )
                 ui.pager(b'outgoing')
                 return bookmarks.outgoing(ui, repo, other)
@@ -5434,12 +5447,12 @@
         raise error.InputError(msg, hint=hint)
 
     for path in urlutil.get_pull_paths(repo, ui, sources):
-        source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
-        ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(source))
+        ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
         ui.flush()
-        other = hg.peer(repo, opts, source)
+        other = hg.peer(repo, opts, path)
         update_conflict = None
         try:
+            branches = (path.branch, opts.get(b'branch', []))
             revs, checkout = hg.addbranchrevs(
                 repo, other, branches, opts.get(b'rev')
             )
@@ -5515,8 +5528,12 @@
                     elif opts.get(b'branch'):
                         brev = opts[b'branch'][0]
                     else:
-                        brev = branches[0]
-                repo._subtoppath = source
+                        brev = path.branch
+
+                # XXX path: we are losing the `path` object here. Keeping it
+                # would be valuable. For example as a "variant" as we do
+                # for pushes.
+                repo._subtoppath = path.loc
                 try:
                     update_conflict = postincoming(
                         ui, repo, modheads, opts.get(b'update'), checkout, brev
@@ -5766,7 +5783,7 @@
     some_pushed = False
     result = 0
     for path in urlutil.get_push_paths(repo, ui, dests):
-        dest = path.pushloc or path.loc
+        dest = path.loc
         branches = (path.branch, opts.get(b'branch') or [])
         ui.status(_(b'pushing to %s\n') % urlutil.hidepassword(dest))
         revs, checkout = hg.addbranchrevs(
@@ -5940,12 +5957,13 @@
     if not pats and not after:
         raise error.InputError(_(b'no files specified'))
 
-    m = scmutil.match(repo[None], pats, opts)
-    subrepos = opts.get(b'subrepos')
-    uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
-    return cmdutil.remove(
-        ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
-    )
+    with repo.wlock(), repo.dirstate.changing_files(repo):
+        m = scmutil.match(repo[None], pats, opts)
+        subrepos = opts.get(b'subrepos')
+        uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
+        return cmdutil.remove(
+            ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
+        )
 
 
 @command(
@@ -5994,7 +6012,18 @@
     Returns 0 on success, 1 if errors are encountered.
     """
     opts = pycompat.byteskwargs(opts)
-    with repo.wlock():
+    context = repo.dirstate.changing_files
+    rev = opts.get(b'at_rev')
+    ctx = None
+    if rev:
+        ctx = logcmdutil.revsingle(repo, rev)
+        if ctx.rev() is not None:
+
+            def context(repo):
+                return util.nullcontextmanager()
+
+            opts[b'at_rev'] = ctx.rev()
+    with repo.wlock(), context(repo):
         return cmdutil.copy(ui, repo, pats, opts, rename=True)
 
 
@@ -6260,7 +6289,7 @@
         #
         # All this should eventually happens, but in the mean time, we use this
         # context manager slightly out of the context it should be.
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
 
         if not didwork and pats:
@@ -7252,23 +7281,22 @@
         # XXX We should actually skip this if no default is specified, instead
         # of passing "default" which will resolve as "./default/" if no default
         # path is defined.
-        source, branches = urlutil.get_unique_pull_path(
-            b'summary', repo, ui, b'default'
-        )
-        sbranch = branches[0]
+        path = urlutil.get_unique_pull_path_obj(b'summary', ui, b'default')
+        sbranch = path.branch
         try:
-            other = hg.peer(repo, {}, source)
+            other = hg.peer(repo, {}, path)
         except error.RepoError:
             if opts.get(b'remote'):
                 raise
-            return source, sbranch, None, None, None
+            return path.loc, sbranch, None, None, None
+        branches = (path.branch, [])
         revs, checkout = hg.addbranchrevs(repo, other, branches, None)
         if revs:
             revs = [other.lookup(rev) for rev in revs]
-        ui.debug(b'comparing with %s\n' % urlutil.hidepassword(source))
+        ui.debug(b'comparing with %s\n' % urlutil.hidepassword(path.loc))
         with repo.ui.silent():
             commoninc = discovery.findcommonincoming(repo, other, heads=revs)
-        return source, sbranch, other, commoninc, commoninc[1]
+        return path.loc, sbranch, other, commoninc, commoninc[1]
 
     if needsincoming:
         source, sbranch, sother, commoninc, incoming = getincoming()
@@ -7284,9 +7312,10 @@
             d = b'default-push'
         elif b'default' in ui.paths:
             d = b'default'
+        path = None
         if d is not None:
             path = urlutil.get_unique_push_path(b'summary', repo, ui, d)
-            dest = path.pushloc or path.loc
+            dest = path.loc
             dbranch = path.branch
         else:
             dest = b'default'
@@ -7294,7 +7323,7 @@
         revs, checkout = hg.addbranchrevs(repo, repo, (dbranch, []), None)
         if source != dest:
             try:
-                dother = hg.peer(repo, {}, dest)
+                dother = hg.peer(repo, {}, path if path is not None else dest)
             except error.RepoError:
                 if opts.get(b'remote'):
                     raise
@@ -7472,8 +7501,11 @@
                 )
         node = logcmdutil.revsingle(repo, rev_).node()
 
+        # don't allow tagging the null rev or the working directory
         if node is None:
             raise error.InputError(_(b"cannot tag working directory"))
+        elif not opts.get(b'remove') and node == nullid:
+            raise error.InputError(_(b"cannot tag null revision"))
 
         if not message:
             # we don't translate commit messages
@@ -7494,13 +7526,6 @@
             editform=editform, **pycompat.strkwargs(opts)
         )
 
-        # don't allow tagging the null rev
-        if (
-            not opts.get(b'remove')
-            and logcmdutil.revsingle(repo, rev_).rev() == nullrev
-        ):
-            raise error.InputError(_(b"cannot tag null revision"))
-
         tagsmod.tag(
             repo,
             names,
--- a/mercurial/configitems.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/configitems.py	Thu Mar 02 22:45:44 2023 +0100
@@ -588,6 +588,18 @@
     b'revlog.debug-delta',
     default=False,
 )
+# display extra information about the bundling process
+coreconfigitem(
+    b'debug',
+    b'bundling-stats',
+    default=False,
+)
+# display extra information about the unbundling process
+coreconfigitem(
+    b'debug',
+    b'unbundling-stats',
+    default=False,
+)
 coreconfigitem(
     b'defaults',
     b'.*',
@@ -779,6 +791,14 @@
     b'discovery.exchange-heads',
     default=True,
 )
+# If devel.debug.abort-update is True, then any merge with the working copy,
+# e.g. [hg update], will be aborted after figuring out what needs to be done,
+# but before spawning the parallel worker
+coreconfigitem(
+    b'devel',
+    b'debug.abort-update',
+    default=False,
+)
 # If discovery.grow-sample is False, the sample size used in set discovery will
 # not be increased through the process
 coreconfigitem(
@@ -956,6 +976,13 @@
     b'changegroup4',
     default=False,
 )
+
+# might remove rank configuration once the computation has no impact
+coreconfigitem(
+    b'experimental',
+    b'changelog-v2.compute-rank',
+    default=True,
+)
 coreconfigitem(
     b'experimental',
     b'cleanup-as-archived',
@@ -1819,6 +1846,13 @@
 )
 coreconfigitem(
     b'merge-tools',
+    br'.*\.regappend$',
+    default=b"",
+    generic=True,
+    priority=-1,
+)
+coreconfigitem(
+    b'merge-tools',
     br'.*\.symlink$',
     default=False,
     generic=True,
@@ -1862,7 +1896,7 @@
 )
 coreconfigitem(
     b'paths',
-    b'.*',
+    b'[^:]*',
     default=None,
     generic=True,
 )
@@ -1891,6 +1925,12 @@
     generic=True,
 )
 coreconfigitem(
+    b'paths',
+    b'.*:pulled-delta-reuse-policy',
+    default=None,
+    generic=True,
+)
+coreconfigitem(
     b'phases',
     b'checksubrepos',
     default=b'follow',
@@ -2068,6 +2108,11 @@
 )
 coreconfigitem(
     b'storage',
+    b'revlog.delta-parent-search.candidate-group-chunk-size',
+    default=20,
+)
+coreconfigitem(
+    b'storage',
     b'revlog.issue6528.fix-incoming',
     default=True,
 )
@@ -2089,6 +2134,7 @@
     b'revlog.reuse-external-delta',
     default=True,
 )
+# This option is True unless `format.generaldelta` is set.
 coreconfigitem(
     b'storage',
     b'revlog.reuse-external-delta-parent',
@@ -2168,7 +2214,7 @@
 coreconfigitem(
     b'server',
     b'pullbundle',
-    default=False,
+    default=True,
 )
 coreconfigitem(
     b'server',
--- a/mercurial/context.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/context.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1596,7 +1596,7 @@
         if p2node is None:
             p2node = self._repo.nodeconstants.nullid
         dirstate = self._repo.dirstate
-        with dirstate.parentchange():
+        with dirstate.changing_parents(self._repo):
             copies = dirstate.setparents(p1node, p2node)
             pctx = self._repo[p1node]
             if copies:
@@ -1855,47 +1855,43 @@
 
     def _poststatusfixup(self, status, fixup):
         """update dirstate for files that are actually clean"""
-        ui = self._repo.ui
         testing.wait_on_cfg(self._repo.ui, b'status.pre-dirstate-write-file')
+        dirstate = self._repo.dirstate
         poststatus = self._repo.postdsstatus()
-        if fixup or poststatus or self._repo.dirstate._dirty:
+        if fixup:
+            if dirstate.is_changing_parents:
+                normal = lambda f, pfd: dirstate.update_file(
+                    f,
+                    p1_tracked=True,
+                    wc_tracked=True,
+                )
+            else:
+                normal = dirstate.set_clean
+            for f, pdf in fixup:
+                normal(f, pdf)
+        if poststatus or self._repo.dirstate._dirty:
             try:
-                oldid = self._repo.dirstate.identity()
-
                 # updating the dirstate is optional
                 # so we don't wait on the lock
                 # wlock can invalidate the dirstate, so cache normal _after_
                 # taking the lock
+                pre_dirty = dirstate._dirty
                 with self._repo.wlock(False):
-                    dirstate = self._repo.dirstate
-                    if dirstate.identity() == oldid:
-                        if fixup:
-                            if dirstate.pendingparentchange():
-                                normal = lambda f, pfd: dirstate.update_file(
-                                    f, p1_tracked=True, wc_tracked=True
-                                )
-                            else:
-                                normal = dirstate.set_clean
-                            for f, pdf in fixup:
-                                normal(f, pdf)
-                            # write changes out explicitly, because nesting
-                            # wlock at runtime may prevent 'wlock.release()'
-                            # after this block from doing so for subsequent
-                            # changing files
-                            tr = self._repo.currenttransaction()
-                            self._repo.dirstate.write(tr)
-
-                        if poststatus:
-                            for ps in poststatus:
-                                ps(self, status)
-                    else:
-                        # in this case, writing changes out breaks
-                        # consistency, because .hg/dirstate was
-                        # already changed simultaneously after last
-                        # caching (see also issue5584 for detail)
-                        ui.debug(b'skip updating dirstate: identity mismatch\n')
+                    assert self._repo.dirstate is dirstate
+                    post_dirty = dirstate._dirty
+                    if post_dirty:
+                        tr = self._repo.currenttransaction()
+                        dirstate.write(tr)
+                    elif pre_dirty:
+                        # the wlock grabbing detected that dirtate changes
+                        # needed to be dropped
+                        m = b'skip updating dirstate: identity mismatch\n'
+                        self._repo.ui.debug(m)
+                    if poststatus:
+                        for ps in poststatus:
+                            ps(self, status)
             except error.LockError:
-                pass
+                dirstate.invalidate()
             finally:
                 # Even if the wlock couldn't be grabbed, clear out the list.
                 self._repo.clearpostdsstatus()
@@ -1905,25 +1901,27 @@
         subrepos = []
         if b'.hgsub' in self:
             subrepos = sorted(self.substate)
-        cmp, s, mtime_boundary = self._repo.dirstate.status(
-            match, subrepos, ignored=ignored, clean=clean, unknown=unknown
-        )
-
-        # check for any possibly clean files
-        fixup = []
-        if cmp:
-            modified2, deleted2, clean_set, fixup = self._checklookup(
-                cmp, mtime_boundary
+        dirstate = self._repo.dirstate
+        with dirstate.running_status(self._repo):
+            cmp, s, mtime_boundary = dirstate.status(
+                match, subrepos, ignored=ignored, clean=clean, unknown=unknown
             )
-            s.modified.extend(modified2)
-            s.deleted.extend(deleted2)
-
-            if clean_set and clean:
-                s.clean.extend(clean_set)
-            if fixup and clean:
-                s.clean.extend((f for f, _ in fixup))
-
-        self._poststatusfixup(s, fixup)
+
+            # check for any possibly clean files
+            fixup = []
+            if cmp:
+                modified2, deleted2, clean_set, fixup = self._checklookup(
+                    cmp, mtime_boundary
+                )
+                s.modified.extend(modified2)
+                s.deleted.extend(deleted2)
+
+                if clean_set and clean:
+                    s.clean.extend(clean_set)
+                if fixup and clean:
+                    s.clean.extend((f for f, _ in fixup))
+
+            self._poststatusfixup(s, fixup)
 
         if match.always():
             # cache for performance
@@ -2051,7 +2049,7 @@
         return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
 
     def markcommitted(self, node):
-        with self._repo.dirstate.parentchange():
+        with self._repo.dirstate.changing_parents(self._repo):
             for f in self.modified() + self.added():
                 self._repo.dirstate.update_file(
                     f, p1_tracked=True, wc_tracked=True
--- a/mercurial/debugcommands.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/debugcommands.py	Thu Mar 02 22:45:44 2023 +0100
@@ -21,7 +21,6 @@
 import socket
 import ssl
 import stat
-import string
 import subprocess
 import sys
 import time
@@ -73,7 +72,6 @@
     repoview,
     requirements,
     revlog,
-    revlogutils,
     revset,
     revsetlang,
     scmutil,
@@ -89,6 +87,7 @@
     upgrade,
     url as urlmod,
     util,
+    verify,
     vfs as vfsmod,
     wireprotoframing,
     wireprotoserver,
@@ -556,15 +555,9 @@
 @command(b'debugcheckstate', [], b'')
 def debugcheckstate(ui, repo):
     """validate the correctness of the current dirstate"""
-    parent1, parent2 = repo.dirstate.parents()
-    m1 = repo[parent1].manifest()
-    m2 = repo[parent2].manifest()
-    errors = 0
-    for err in repo.dirstate.verify(m1, m2):
-        ui.warn(err[0] % err[1:])
-        errors += 1
+    errors = verify.verifier(repo)._verify_dirstate()
     if errors:
-        errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
+        errstr = _(b"dirstate inconsistent with current parent's manifest")
         raise error.Abort(errstr)
 
 
@@ -990,17 +983,29 @@
 
 @command(
     b'debug-delta-find',
-    cmdutil.debugrevlogopts + cmdutil.formatteropts,
+    cmdutil.debugrevlogopts
+    + cmdutil.formatteropts
+    + [
+        (
+            b'',
+            b'source',
+            b'full',
+            _(b'input data feed to the process (full, storage, p1, p2, prev)'),
+        ),
+    ],
     _(b'-c|-m|FILE REV'),
     optionalrepo=True,
 )
-def debugdeltafind(ui, repo, arg_1, arg_2=None, **opts):
+def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
     """display the computation to get to a valid delta for storing REV
 
     This command will replay the process used to find the "best" delta to store
     a revision and display information about all the steps used to get to that
     result.
 
+    By default, the process is fed with a the full-text for the revision. This
+    can be controlled with the --source flag.
+
     The revision use the revision number of the target storage (not changelog
     revision number).
 
@@ -1017,34 +1022,22 @@
     rev = int(rev)
 
     revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
-
-    deltacomputer = deltautil.deltacomputer(
-        revlog,
-        write_debug=ui.write,
-        debug_search=not ui.quiet,
-    )
-
-    node = revlog.node(rev)
     p1r, p2r = revlog.parentrevs(rev)
-    p1 = revlog.node(p1r)
-    p2 = revlog.node(p2r)
-    btext = [revlog.revision(rev)]
-    textlen = len(btext[0])
-    cachedelta = None
-    flags = revlog.flags(rev)
-
-    revinfo = revlogutils.revisioninfo(
-        node,
-        p1,
-        p2,
-        btext,
-        textlen,
-        cachedelta,
-        flags,
-    )
-
-    fh = revlog._datafp()
-    deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
+
+    if source == b'full':
+        base_rev = nullrev
+    elif source == b'storage':
+        base_rev = revlog.deltaparent(rev)
+    elif source == b'p1':
+        base_rev = p1r
+    elif source == b'p2':
+        base_rev = p2r
+    elif source == b'prev':
+        base_rev = rev - 1
+    else:
+        raise error.InputError(b"invalid --source value: %s" % source)
+
+    revlog_debug.debug_delta_find(ui, revlog, rev, base_rev=base_rev)
 
 
 @command(
@@ -1236,12 +1229,12 @@
     random.seed(int(opts[b'seed']))
 
     if not remote_revs:
-
-        remoteurl, branches = urlutil.get_unique_pull_path(
-            b'debugdiscovery', repo, ui, remoteurl
+        path = urlutil.get_unique_pull_path_obj(
+            b'debugdiscovery', ui, remoteurl
         )
-        remote = hg.peer(repo, opts, remoteurl)
-        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(remoteurl))
+        branches = (path.branch, [])
+        remote = hg.peer(repo, opts, path)
+        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
     else:
         branches = (None, [])
         remote_filtered_revs = logcmdutil.revrange(
@@ -3135,6 +3128,9 @@
     """
     ctx = scmutil.revsingle(repo, rev)
     with repo.wlock():
+        if repo.currenttransaction() is not None:
+            msg = b'rebuild the dirstate outside of a transaction'
+            raise error.ProgrammingError(msg)
         dirstate = repo.dirstate
         changedfiles = None
         # See command doc for what minimal does.
@@ -3146,7 +3142,8 @@
             dsnotadded = {f for f in dsonly if not dirstate.get_entry(f).added}
             changedfiles = manifestonly | dsnotadded
 
-        dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
+        with dirstate.changing_parents(repo):
+            dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
 
 
 @command(
@@ -3207,348 +3204,10 @@
     r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
 
     if opts.get(b"dump"):
-        numrevs = len(r)
-        ui.write(
-            (
-                b"# rev p1rev p2rev start   end deltastart base   p1   p2"
-                b" rawsize totalsize compression heads chainlen\n"
-            )
-        )
-        ts = 0
-        heads = set()
-
-        for rev in range(numrevs):
-            dbase = r.deltaparent(rev)
-            if dbase == -1:
-                dbase = rev
-            cbase = r.chainbase(rev)
-            clen = r.chainlen(rev)
-            p1, p2 = r.parentrevs(rev)
-            rs = r.rawsize(rev)
-            ts = ts + rs
-            heads -= set(r.parentrevs(rev))
-            heads.add(rev)
-            try:
-                compression = ts / r.end(rev)
-            except ZeroDivisionError:
-                compression = 0
-            ui.write(
-                b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
-                b"%11d %5d %8d\n"
-                % (
-                    rev,
-                    p1,
-                    p2,
-                    r.start(rev),
-                    r.end(rev),
-                    r.start(dbase),
-                    r.start(cbase),
-                    r.start(p1),
-                    r.start(p2),
-                    rs,
-                    ts,
-                    compression,
-                    len(heads),
-                    clen,
-                )
-            )
-        return 0
-
-    format = r._format_version
-    v = r._format_flags
-    flags = []
-    gdelta = False
-    if v & revlog.FLAG_INLINE_DATA:
-        flags.append(b'inline')
-    if v & revlog.FLAG_GENERALDELTA:
-        gdelta = True
-        flags.append(b'generaldelta')
-    if not flags:
-        flags = [b'(none)']
-
-    ### tracks merge vs single parent
-    nummerges = 0
-
-    ### tracks ways the "delta" are build
-    # nodelta
-    numempty = 0
-    numemptytext = 0
-    numemptydelta = 0
-    # full file content
-    numfull = 0
-    # intermediate snapshot against a prior snapshot
-    numsemi = 0
-    # snapshot count per depth
-    numsnapdepth = collections.defaultdict(lambda: 0)
-    # delta against previous revision
-    numprev = 0
-    # delta against first or second parent (not prev)
-    nump1 = 0
-    nump2 = 0
-    # delta against neither prev nor parents
-    numother = 0
-    # delta against prev that are also first or second parent
-    # (details of `numprev`)
-    nump1prev = 0
-    nump2prev = 0
-
-    # data about delta chain of each revs
-    chainlengths = []
-    chainbases = []
-    chainspans = []
-
-    # data about each revision
-    datasize = [None, 0, 0]
-    fullsize = [None, 0, 0]
-    semisize = [None, 0, 0]
-    # snapshot count per depth
-    snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
-    deltasize = [None, 0, 0]
-    chunktypecounts = {}
-    chunktypesizes = {}
-
-    def addsize(size, l):
-        if l[0] is None or size < l[0]:
-            l[0] = size
-        if size > l[1]:
-            l[1] = size
-        l[2] += size
-
-    numrevs = len(r)
-    for rev in range(numrevs):
-        p1, p2 = r.parentrevs(rev)
-        delta = r.deltaparent(rev)
-        if format > 0:
-            addsize(r.rawsize(rev), datasize)
-        if p2 != nullrev:
-            nummerges += 1
-        size = r.length(rev)
-        if delta == nullrev:
-            chainlengths.append(0)
-            chainbases.append(r.start(rev))
-            chainspans.append(size)
-            if size == 0:
-                numempty += 1
-                numemptytext += 1
-            else:
-                numfull += 1
-                numsnapdepth[0] += 1
-                addsize(size, fullsize)
-                addsize(size, snapsizedepth[0])
-        else:
-            chainlengths.append(chainlengths[delta] + 1)
-            baseaddr = chainbases[delta]
-            revaddr = r.start(rev)
-            chainbases.append(baseaddr)
-            chainspans.append((revaddr - baseaddr) + size)
-            if size == 0:
-                numempty += 1
-                numemptydelta += 1
-            elif r.issnapshot(rev):
-                addsize(size, semisize)
-                numsemi += 1
-                depth = r.snapshotdepth(rev)
-                numsnapdepth[depth] += 1
-                addsize(size, snapsizedepth[depth])
-            else:
-                addsize(size, deltasize)
-                if delta == rev - 1:
-                    numprev += 1
-                    if delta == p1:
-                        nump1prev += 1
-                    elif delta == p2:
-                        nump2prev += 1
-                elif delta == p1:
-                    nump1 += 1
-                elif delta == p2:
-                    nump2 += 1
-                elif delta != nullrev:
-                    numother += 1
-
-        # Obtain data on the raw chunks in the revlog.
-        if util.safehasattr(r, b'_getsegmentforrevs'):
-            segment = r._getsegmentforrevs(rev, rev)[1]
-        else:
-            segment = r._revlog._getsegmentforrevs(rev, rev)[1]
-        if segment:
-            chunktype = bytes(segment[0:1])
-        else:
-            chunktype = b'empty'
-
-        if chunktype not in chunktypecounts:
-            chunktypecounts[chunktype] = 0
-            chunktypesizes[chunktype] = 0
-
-        chunktypecounts[chunktype] += 1
-        chunktypesizes[chunktype] += size
-
-    # Adjust size min value for empty cases
-    for size in (datasize, fullsize, semisize, deltasize):
-        if size[0] is None:
-            size[0] = 0
-
-    numdeltas = numrevs - numfull - numempty - numsemi
-    numoprev = numprev - nump1prev - nump2prev
-    totalrawsize = datasize[2]
-    datasize[2] /= numrevs
-    fulltotal = fullsize[2]
-    if numfull == 0:
-        fullsize[2] = 0
+        revlog_debug.dump(ui, r)
     else:
-        fullsize[2] /= numfull
-    semitotal = semisize[2]
-    snaptotal = {}
-    if numsemi > 0:
-        semisize[2] /= numsemi
-    for depth in snapsizedepth:
-        snaptotal[depth] = snapsizedepth[depth][2]
-        snapsizedepth[depth][2] /= numsnapdepth[depth]
-
-    deltatotal = deltasize[2]
-    if numdeltas > 0:
-        deltasize[2] /= numdeltas
-    totalsize = fulltotal + semitotal + deltatotal
-    avgchainlen = sum(chainlengths) / numrevs
-    maxchainlen = max(chainlengths)
-    maxchainspan = max(chainspans)
-    compratio = 1
-    if totalsize:
-        compratio = totalrawsize / totalsize
-
-    basedfmtstr = b'%%%dd\n'
-    basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
-
-    def dfmtstr(max):
-        return basedfmtstr % len(str(max))
-
-    def pcfmtstr(max, padding=0):
-        return basepcfmtstr % (len(str(max)), b' ' * padding)
-
-    def pcfmt(value, total):
-        if total:
-            return (value, 100 * float(value) / total)
-        else:
-            return value, 100.0
-
-    ui.writenoi18n(b'format : %d\n' % format)
-    ui.writenoi18n(b'flags  : %s\n' % b', '.join(flags))
-
-    ui.write(b'\n')
-    fmt = pcfmtstr(totalsize)
-    fmt2 = dfmtstr(totalsize)
-    ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
-    ui.writenoi18n(b'    merges    : ' + fmt % pcfmt(nummerges, numrevs))
-    ui.writenoi18n(
-        b'    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
-    )
-    ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
-    ui.writenoi18n(b'    empty     : ' + fmt % pcfmt(numempty, numrevs))
-    ui.writenoi18n(
-        b'                   text  : '
-        + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
-    )
-    ui.writenoi18n(
-        b'                   delta : '
-        + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
-    )
-    ui.writenoi18n(
-        b'    snapshot  : ' + fmt % pcfmt(numfull + numsemi, numrevs)
-    )
-    for depth in sorted(numsnapdepth):
-        ui.write(
-            (b'      lvl-%-3d :       ' % depth)
-            + fmt % pcfmt(numsnapdepth[depth], numrevs)
-        )
-    ui.writenoi18n(b'    deltas    : ' + fmt % pcfmt(numdeltas, numrevs))
-    ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
-    ui.writenoi18n(
-        b'    snapshot  : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
-    )
-    for depth in sorted(numsnapdepth):
-        ui.write(
-            (b'      lvl-%-3d :       ' % depth)
-            + fmt % pcfmt(snaptotal[depth], totalsize)
-        )
-    ui.writenoi18n(b'    deltas    : ' + fmt % pcfmt(deltatotal, totalsize))
-
-    def fmtchunktype(chunktype):
-        if chunktype == b'empty':
-            return b'    %s     : ' % chunktype
-        elif chunktype in pycompat.bytestr(string.ascii_letters):
-            return b'    0x%s (%s)  : ' % (hex(chunktype), chunktype)
-        else:
-            return b'    0x%s      : ' % hex(chunktype)
-
-    ui.write(b'\n')
-    ui.writenoi18n(b'chunks        : ' + fmt2 % numrevs)
-    for chunktype in sorted(chunktypecounts):
-        ui.write(fmtchunktype(chunktype))
-        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
-    ui.writenoi18n(b'chunks size   : ' + fmt2 % totalsize)
-    for chunktype in sorted(chunktypecounts):
-        ui.write(fmtchunktype(chunktype))
-        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
-
-    ui.write(b'\n')
-    fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
-    ui.writenoi18n(b'avg chain length  : ' + fmt % avgchainlen)
-    ui.writenoi18n(b'max chain length  : ' + fmt % maxchainlen)
-    ui.writenoi18n(b'max chain reach   : ' + fmt % maxchainspan)
-    ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
-
-    if format > 0:
-        ui.write(b'\n')
-        ui.writenoi18n(
-            b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
-            % tuple(datasize)
-        )
-    ui.writenoi18n(
-        b'full revision size (min/max/avg)     : %d / %d / %d\n'
-        % tuple(fullsize)
-    )
-    ui.writenoi18n(
-        b'inter-snapshot size (min/max/avg)    : %d / %d / %d\n'
-        % tuple(semisize)
-    )
-    for depth in sorted(snapsizedepth):
-        if depth == 0:
-            continue
-        ui.writenoi18n(
-            b'    level-%-3d (min/max/avg)          : %d / %d / %d\n'
-            % ((depth,) + tuple(snapsizedepth[depth]))
-        )
-    ui.writenoi18n(
-        b'delta size (min/max/avg)             : %d / %d / %d\n'
-        % tuple(deltasize)
-    )
-
-    if numdeltas > 0:
-        ui.write(b'\n')
-        fmt = pcfmtstr(numdeltas)
-        fmt2 = pcfmtstr(numdeltas, 4)
-        ui.writenoi18n(
-            b'deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas)
-        )
-        if numprev > 0:
-            ui.writenoi18n(
-                b'    where prev = p1  : ' + fmt2 % pcfmt(nump1prev, numprev)
-            )
-            ui.writenoi18n(
-                b'    where prev = p2  : ' + fmt2 % pcfmt(nump2prev, numprev)
-            )
-            ui.writenoi18n(
-                b'    other            : ' + fmt2 % pcfmt(numoprev, numprev)
-            )
-        if gdelta:
-            ui.writenoi18n(
-                b'deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas)
-            )
-            ui.writenoi18n(
-                b'deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas)
-            )
-            ui.writenoi18n(
-                b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
-            )
+        revlog_debug.debug_revlog(ui, r)
+    return 0
 
 
 @command(
@@ -3935,10 +3594,8 @@
             )
         source = b"default"
 
-    source, branches = urlutil.get_unique_pull_path(
-        b'debugssl', repo, ui, source
-    )
-    url = urlutil.url(source)
+    path = urlutil.get_unique_pull_path_obj(b'debugssl', ui, source)
+    url = path.url
 
     defaultport = {b'https': 443, b'ssh': 22}
     if url.scheme in defaultport:
@@ -4049,20 +3706,19 @@
     for backup in backups:
         # Much of this is copied from the hg incoming logic
         source = os.path.relpath(backup, encoding.getcwd())
-        source, branches = urlutil.get_unique_pull_path(
+        path = urlutil.get_unique_pull_path_obj(
             b'debugbackupbundle',
-            repo,
             ui,
             source,
-            default_branches=opts.get(b'branch'),
         )
         try:
-            other = hg.peer(repo, opts, source)
+            other = hg.peer(repo, opts, path)
         except error.LookupError as ex:
-            msg = _(b"\nwarning: unable to open bundle %s") % source
+            msg = _(b"\nwarning: unable to open bundle %s") % path.loc
             hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
             ui.warn(msg, hint=hint)
             continue
+        branches = (path.branch, opts.get(b'branch', []))
         revs, checkout = hg.addbranchrevs(
             repo, other, branches, opts.get(b"rev")
         )
@@ -4085,29 +3741,29 @@
                 with repo.lock(), repo.transaction(b"unbundle") as tr:
                     if scmutil.isrevsymbol(other, recovernode):
                         ui.status(_(b"Unbundling %s\n") % (recovernode))
-                        f = hg.openpath(ui, source)
-                        gen = exchange.readbundle(ui, f, source)
+                        f = hg.openpath(ui, path.loc)
+                        gen = exchange.readbundle(ui, f, path.loc)
                         if isinstance(gen, bundle2.unbundle20):
                             bundle2.applybundle(
                                 repo,
                                 gen,
                                 tr,
                                 source=b"unbundle",
-                                url=b"bundle:" + source,
+                                url=b"bundle:" + path.loc,
                             )
                         else:
-                            gen.apply(repo, b"unbundle", b"bundle:" + source)
+                            gen.apply(repo, b"unbundle", b"bundle:" + path.loc)
                         break
             else:
                 backupdate = encoding.strtolocal(
                     time.strftime(
                         "%a %H:%M, %Y-%m-%d",
-                        time.localtime(os.path.getmtime(source)),
+                        time.localtime(os.path.getmtime(path.loc)),
                     )
                 )
                 ui.status(b"\n%s\n" % (backupdate.ljust(50)))
                 if ui.verbose:
-                    ui.status(b"%s%s\n" % (b"bundle:".ljust(13), source))
+                    ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
                 else:
                     opts[
                         b"template"
@@ -4134,8 +3790,21 @@
         ui.writenoi18n(b' revision %s\n' % v[1])
 
 
-@command(b'debugshell', optionalrepo=True)
-def debugshell(ui, repo):
+@command(
+    b'debugshell',
+    [
+        (
+            b'c',
+            b'command',
+            b'',
+            _(b'program passed in as a string'),
+            _(b'COMMAND'),
+        )
+    ],
+    _(b'[-c COMMAND]'),
+    optionalrepo=True,
+)
+def debugshell(ui, repo, **opts):
     """run an interactive Python interpreter
 
     The local namespace is provided with a reference to the ui and
@@ -4148,10 +3817,58 @@
         'repo': repo,
     }
 
+    # py2exe disables initialization of the site module, which is responsible
+    # for arranging for ``quit()`` to exit the interpreter.  Manually initialize
+    # the stuff that site normally does here, so that the interpreter can be
+    # quit in a consistent manner, whether run with pyoxidizer, exewrapper.c,
+    # py.exe, or py2exe.
+    if getattr(sys, "frozen", None) == 'console_exe':
+        try:
+            import site
+
+            site.setcopyright()
+            site.sethelper()
+            site.setquit()
+        except ImportError:
+            site = None  # Keep PyCharm happy
+
+    command = opts.get('command')
+    if command:
+        compiled = code.compile_command(encoding.strfromlocal(command))
+        code.InteractiveInterpreter(locals=imported_objects).runcode(compiled)
+        return
+
     code.interact(local=imported_objects)
 
 
 @command(
+    b'debug-revlog-stats',
+    [
+        (b'c', b'changelog', None, _(b'Display changelog statistics')),
+        (b'm', b'manifest', None, _(b'Display manifest statistics')),
+        (b'f', b'filelogs', None, _(b'Display filelogs statistics')),
+    ]
+    + cmdutil.formatteropts,
+)
+def debug_revlog_stats(ui, repo, **opts):
+    """display statistics about revlogs in the store"""
+    opts = pycompat.byteskwargs(opts)
+    changelog = opts[b"changelog"]
+    manifest = opts[b"manifest"]
+    filelogs = opts[b"filelogs"]
+
+    if changelog is None and manifest is None and filelogs is None:
+        changelog = True
+        manifest = True
+        filelogs = True
+
+    repo = repo.unfiltered()
+    fm = ui.formatter(b'debug-revlog-stats', opts)
+    revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
+    fm.end()
+
+
+@command(
     b'debugsuccessorssets',
     [(b'', b'closest', False, _(b'return closest successors sets only'))],
     _(b'[REV]'),
@@ -4843,7 +4560,8 @@
                 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
             )
         else:
-            peer = httppeer.makepeer(ui, path, opener=opener)
+            peer_path = urlutil.try_path(ui, path)
+            peer = httppeer.makepeer(ui, peer_path, opener=opener)
 
         # We /could/ populate stdin/stdout with sock.makefile()...
     else:
--- a/mercurial/diffutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/diffutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -142,7 +142,7 @@
         )
         buildopts[b'ignorewseol'] = get(b'ignore_space_at_eol', b'ignorewseol')
     if formatchanging:
-        buildopts[b'text'] = opts and opts.get(b'text')
+        buildopts[b'text'] = None if opts is None else opts.get(b'text')
         binary = None if opts is None else opts.get(b'binary')
         buildopts[b'nobinary'] = (
             not binary
--- a/mercurial/dirstate.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/dirstate.py	Thu Mar 02 22:45:44 2023 +0100
@@ -27,11 +27,11 @@
     policy,
     pycompat,
     scmutil,
+    txnutil,
     util,
 )
 
 from .dirstateutils import (
-    docket as docketmod,
     timestamp,
 )
 
@@ -43,6 +43,9 @@
 parsers = policy.importmod('parsers')
 rustmod = policy.importrust('dirstate')
 
+# use to detect lack of a parameter
+SENTINEL = object()
+
 HAS_FAST_DIRSTATE_V2 = rustmod is not None
 
 propertycache = util.propertycache
@@ -66,10 +69,17 @@
         return obj._join(fname)
 
 
-def requires_parents_change(func):
+def check_invalidated(func):
+    """check that the func is called with a non-invalidated dirstate
+
+    The dirstate is in an "invalidated state" after an error occured during its
+    modification and remains so until we exited the top level scope that framed
+    such change.
+    """
+
     def wrap(self, *args, **kwargs):
-        if not self.pendingparentchange():
-            msg = 'calling `%s` outside of a parentchange context'
+        if self._invalidated_context:
+            msg = 'calling `%s` after the dirstate was invalidated'
             msg %= func.__name__
             raise error.ProgrammingError(msg)
         return func(self, *args, **kwargs)
@@ -77,19 +87,63 @@
     return wrap
 
 
-def requires_no_parents_change(func):
+def requires_changing_parents(func):
     def wrap(self, *args, **kwargs):
-        if self.pendingparentchange():
-            msg = 'calling `%s` inside of a parentchange context'
+        if not self.is_changing_parents:
+            msg = 'calling `%s` outside of a changing_parents context'
+            msg %= func.__name__
+            raise error.ProgrammingError(msg)
+        return func(self, *args, **kwargs)
+
+    return check_invalidated(wrap)
+
+
+def requires_changing_files(func):
+    def wrap(self, *args, **kwargs):
+        if not self.is_changing_files:
+            msg = 'calling `%s` outside of a `changing_files`'
             msg %= func.__name__
             raise error.ProgrammingError(msg)
         return func(self, *args, **kwargs)
 
-    return wrap
+    return check_invalidated(wrap)
+
+
+def requires_changing_any(func):
+    def wrap(self, *args, **kwargs):
+        if not self.is_changing_any:
+            msg = 'calling `%s` outside of a changing context'
+            msg %= func.__name__
+            raise error.ProgrammingError(msg)
+        return func(self, *args, **kwargs)
+
+    return check_invalidated(wrap)
+
+
+def requires_changing_files_or_status(func):
+    def wrap(self, *args, **kwargs):
+        if not (self.is_changing_files or self._running_status > 0):
+            msg = (
+                'calling `%s` outside of a changing_files '
+                'or running_status context'
+            )
+            msg %= func.__name__
+            raise error.ProgrammingError(msg)
+        return func(self, *args, **kwargs)
+
+    return check_invalidated(wrap)
+
+
+CHANGE_TYPE_PARENTS = "parents"
+CHANGE_TYPE_FILES = "files"
 
 
 @interfaceutil.implementer(intdirstate.idirstate)
 class dirstate:
+
+    # used by largefile to avoid overwritting transaction callback
+    _tr_key_suffix = b''
+
     def __init__(
         self,
         opener,
@@ -124,7 +178,16 @@
         self._dirty_tracked_set = False
         self._ui = ui
         self._filecache = {}
-        self._parentwriters = 0
+        # nesting level of `changing_parents` context
+        self._changing_level = 0
+        # the change currently underway
+        self._change_type = None
+        # number of open _running_status context
+        self._running_status = 0
+        # True if the current dirstate changing operations have been
+        # invalidated (used to make sure all nested contexts have been exited)
+        self._invalidated_context = False
+        self._attached_to_a_transaction = False
         self._filename = b'dirstate'
         self._filename_th = b'dirstate-tracked-hint'
         self._pendingfilename = b'%s.pending' % self._filename
@@ -136,6 +199,12 @@
         # raises an exception).
         self._cwd
 
+    def refresh(self):
+        if '_branch' in vars(self):
+            del self._branch
+        if '_map' in vars(self) and self._map.may_need_refresh():
+            self.invalidate()
+
     def prefetch_parents(self):
         """make sure the parents are loaded
 
@@ -144,39 +213,193 @@
         self._pl
 
     @contextlib.contextmanager
-    def parentchange(self):
-        """Context manager for handling dirstate parents.
+    @check_invalidated
+    def running_status(self, repo):
+        """Wrap a status operation
+
+        This context is not mutally exclusive with the `changing_*` context. It
+        also do not warrant for the `wlock` to be taken.
+
+        If the wlock is taken, this context will behave in a simple way, and
+        ensure the data are scheduled for write when leaving the top level
+        context.
 
-        If an exception occurs in the scope of the context manager,
-        the incoherent dirstate won't be written when wlock is
-        released.
+        If the lock is not taken, it will only warrant that the data are either
+        committed (written) and rolled back (invalidated) when exiting the top
+        level context. The write/invalidate action must be performed by the
+        wrapped code.
+
+
+        The expected  logic is:
+
+        A: read the dirstate
+        B: run status
+           This might make the dirstate dirty by updating cache,
+           especially in Rust.
+        C: do more "post status fixup if relevant
+        D: try to take the w-lock (this will invalidate the changes if they were raced)
+        E0: if dirstate changed on disk → discard change (done by dirstate internal)
+        E1: elif lock was acquired → write the changes
+        E2: else → discard the changes
         """
-        self._parentwriters += 1
-        yield
-        # Typically we want the "undo" step of a context manager in a
-        # finally block so it happens even when an exception
-        # occurs. In this case, however, we only want to decrement
-        # parentwriters if the code in the with statement exits
-        # normally, so we don't have a try/finally here on purpose.
-        self._parentwriters -= 1
+        has_lock = repo.currentwlock() is not None
+        is_changing = self.is_changing_any
+        tr = repo.currenttransaction()
+        has_tr = tr is not None
+        nested = bool(self._running_status)
+
+        first_and_alone = not (is_changing or has_tr or nested)
+
+        # enforce no change happened outside of a proper context.
+        if first_and_alone and self._dirty:
+            has_tr = repo.currenttransaction() is not None
+            if not has_tr and self._changing_level == 0 and self._dirty:
+                msg = "entering a status context, but dirstate is already dirty"
+                raise error.ProgrammingError(msg)
+
+        should_write = has_lock and not (nested or is_changing)
+
+        self._running_status += 1
+        try:
+            yield
+        except Exception:
+            self.invalidate()
+            raise
+        finally:
+            self._running_status -= 1
+            if self._invalidated_context:
+                should_write = False
+                self.invalidate()
+
+        if should_write:
+            assert repo.currenttransaction() is tr
+            self.write(tr)
+        elif not has_lock:
+            if self._dirty:
+                msg = b'dirstate dirty while exiting an isolated status context'
+                repo.ui.develwarn(msg)
+                self.invalidate()
+
+    @contextlib.contextmanager
+    @check_invalidated
+    def _changing(self, repo, change_type):
+        if repo.currentwlock() is None:
+            msg = b"trying to change the dirstate without holding the wlock"
+            raise error.ProgrammingError(msg)
+
+        has_tr = repo.currenttransaction() is not None
+        if not has_tr and self._changing_level == 0 and self._dirty:
+            msg = b"entering a changing context, but dirstate is already dirty"
+            repo.ui.develwarn(msg)
+
+        assert self._changing_level >= 0
+        # different type of change are mutually exclusive
+        if self._change_type is None:
+            assert self._changing_level == 0
+            self._change_type = change_type
+        elif self._change_type != change_type:
+            msg = (
+                'trying to open "%s" dirstate-changing context while a "%s" is'
+                ' already open'
+            )
+            msg %= (change_type, self._change_type)
+            raise error.ProgrammingError(msg)
+        should_write = False
+        self._changing_level += 1
+        try:
+            yield
+        except:  # re-raises
+            self.invalidate()  # this will set `_invalidated_context`
+            raise
+        finally:
+            assert self._changing_level > 0
+            self._changing_level -= 1
+            # If the dirstate is being invalidated, call invalidate again.
+            # This will throw away anything added by a upper context and
+            # reset the `_invalidated_context` flag when relevant
+            if self._changing_level <= 0:
+                self._change_type = None
+                assert self._changing_level == 0
+            if self._invalidated_context:
+                # make sure we invalidate anything an upper context might
+                # have changed.
+                self.invalidate()
+            else:
+                should_write = self._changing_level <= 0
+        tr = repo.currenttransaction()
+        if has_tr != (tr is not None):
+            if has_tr:
+                m = "transaction vanished while changing dirstate"
+            else:
+                m = "transaction appeared while changing dirstate"
+            raise error.ProgrammingError(m)
+        if should_write:
+            self.write(tr)
+
+    @contextlib.contextmanager
+    def changing_parents(self, repo):
+        with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
+            yield c
+
+    @contextlib.contextmanager
+    def changing_files(self, repo):
+        with self._changing(repo, CHANGE_TYPE_FILES) as c:
+            yield c
+
+    # here to help migration to the new code
+    def parentchange(self):
+        msg = (
+            "Mercurial 6.4 and later requires call to "
+            "`dirstate.changing_parents(repo)`"
+        )
+        raise error.ProgrammingError(msg)
+
+    @property
+    def is_changing_any(self):
+        """Returns true if the dirstate is in the middle of a set of changes.
+
+        This returns True for any kind of change.
+        """
+        return self._changing_level > 0
 
     def pendingparentchange(self):
+        return self.is_changing_parent()
+
+    def is_changing_parent(self):
         """Returns true if the dirstate is in the middle of a set of changes
         that modify the dirstate parent.
         """
-        return self._parentwriters > 0
+        self._ui.deprecwarn(b"dirstate.is_changing_parents", b"6.5")
+        return self.is_changing_parents
+
+    @property
+    def is_changing_parents(self):
+        """Returns true if the dirstate is in the middle of a set of changes
+        that modify the dirstate parent.
+        """
+        if self._changing_level <= 0:
+            return False
+        return self._change_type == CHANGE_TYPE_PARENTS
+
+    @property
+    def is_changing_files(self):
+        """Returns true if the dirstate is in the middle of a set of changes
+        that modify the files tracked or their sources.
+        """
+        if self._changing_level <= 0:
+            return False
+        return self._change_type == CHANGE_TYPE_FILES
 
     @propertycache
     def _map(self):
         """Return the dirstate contents (see documentation for dirstatemap)."""
-        self._map = self._mapcls(
+        return self._mapcls(
             self._ui,
             self._opener,
             self._root,
             self._nodeconstants,
             self._use_dirstate_v2,
         )
-        return self._map
 
     @property
     def _sparsematcher(self):
@@ -197,10 +420,19 @@
 
     @repocache(b'branch')
     def _branch(self):
+        f = None
+        data = b''
         try:
-            return self._opener.read(b"branch").strip() or b"default"
+            f, mode = txnutil.trypending(self._root, self._opener, b'branch')
+            data = f.read().strip()
         except FileNotFoundError:
+            pass
+        finally:
+            if f is not None:
+                f.close()
+        if not data:
             return b"default"
+        return data
 
     @property
     def _pl(self):
@@ -365,6 +597,7 @@
     def branch(self):
         return encoding.tolocal(self._branch)
 
+    @requires_changing_parents
     def setparents(self, p1, p2=None):
         """Set dirstate parents to p1 and p2.
 
@@ -376,10 +609,10 @@
         """
         if p2 is None:
             p2 = self._nodeconstants.nullid
-        if self._parentwriters == 0:
+        if self._changing_level == 0:
             raise ValueError(
                 b"cannot set dirstate parent outside of "
-                b"dirstate.parentchange context manager"
+                b"dirstate.changing_parents context manager"
             )
 
         self._dirty = True
@@ -391,21 +624,37 @@
         fold_p2 = oldp2 != nullid and p2 == nullid
         return self._map.setparents(p1, p2, fold_p2=fold_p2)
 
-    def setbranch(self, branch):
+    def setbranch(self, branch, transaction=SENTINEL):
         self.__class__._branch.set(self, encoding.fromlocal(branch))
-        f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
-        try:
-            f.write(self._branch + b'\n')
-            f.close()
+        if transaction is SENTINEL:
+            msg = b"setbranch needs a `transaction` argument"
+            self._ui.deprecwarn(msg, b'6.5')
+            transaction = None
+        if transaction is not None:
+            self._setup_tr_abort(transaction)
+            transaction.addfilegenerator(
+                b'dirstate-3-branch%s' % self._tr_key_suffix,
+                (b'branch',),
+                self._write_branch,
+                location=b'plain',
+                post_finalize=True,
+            )
+            return
 
+        vfs = self._opener
+        with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f:
+            self._write_branch(f)
             # make sure filecache has the correct stat info for _branch after
             # replacing the underlying file
+            #
+            # XXX do we actually need this,
+            # refreshing the attribute is quite cheap
             ce = self._filecache[b'_branch']
             if ce:
                 ce.refresh()
-        except:  # re-raises
-            f.discard()
-            raise
+
+    def _write_branch(self, file_obj):
+        file_obj.write(self._branch + b'\n')
 
     def invalidate(self):
         """Causes the next access to reread the dirstate.
@@ -419,9 +668,14 @@
                 delattr(self, a)
         self._dirty = False
         self._dirty_tracked_set = False
-        self._parentwriters = 0
+        self._invalidated_context = bool(
+            self._changing_level > 0
+            or self._attached_to_a_transaction
+            or self._running_status
+        )
         self._origpl = None
 
+    @requires_changing_any
     def copy(self, source, dest):
         """Mark dest as a copy of source. Unmark dest if source is None."""
         if source == dest:
@@ -439,7 +693,7 @@
     def copies(self):
         return self._map.copymap
 
-    @requires_no_parents_change
+    @requires_changing_files
     def set_tracked(self, filename, reset_copy=False):
         """a "public" method for generic code to mark a file as tracked
 
@@ -461,7 +715,7 @@
             self._dirty_tracked_set = True
         return pre_tracked
 
-    @requires_no_parents_change
+    @requires_changing_files
     def set_untracked(self, filename):
         """a "public" method for generic code to mark a file as untracked
 
@@ -476,7 +730,7 @@
             self._dirty_tracked_set = True
         return ret
 
-    @requires_no_parents_change
+    @requires_changing_files_or_status
     def set_clean(self, filename, parentfiledata):
         """record that the current state of the file on disk is known to be clean"""
         self._dirty = True
@@ -485,13 +739,13 @@
         (mode, size, mtime) = parentfiledata
         self._map.set_clean(filename, mode, size, mtime)
 
-    @requires_no_parents_change
+    @requires_changing_files_or_status
     def set_possibly_dirty(self, filename):
         """record that the current state of the file on disk is unknown"""
         self._dirty = True
         self._map.set_possibly_dirty(filename)
 
-    @requires_parents_change
+    @requires_changing_parents
     def update_file_p1(
         self,
         filename,
@@ -503,7 +757,7 @@
         rewriting operation.
 
         It should not be called during a merge (p2 != nullid) and only within
-        a `with dirstate.parentchange():` context.
+        a `with dirstate.changing_parents(repo):` context.
         """
         if self.in_merge:
             msg = b'update_file_reference should not be called when merging'
@@ -531,7 +785,7 @@
             has_meaningful_mtime=False,
         )
 
-    @requires_parents_change
+    @requires_changing_parents
     def update_file(
         self,
         filename,
@@ -546,12 +800,57 @@
         This is to be called when the direstates parent changes to keep track
         of what is the file situation in regards to the working copy and its parent.
 
-        This function must be called within a `dirstate.parentchange` context.
+        This function must be called within a `dirstate.changing_parents` context.
 
         note: the API is at an early stage and we might need to adjust it
         depending of what information ends up being relevant and useful to
         other processing.
         """
+        self._update_file(
+            filename=filename,
+            wc_tracked=wc_tracked,
+            p1_tracked=p1_tracked,
+            p2_info=p2_info,
+            possibly_dirty=possibly_dirty,
+            parentfiledata=parentfiledata,
+        )
+
+    def hacky_extension_update_file(self, *args, **kwargs):
+        """NEVER USE THIS, YOU DO NOT NEED IT
+
+        This function is a variant of "update_file" to be called by a small set
+        of extensions, it also adjust the internal state of file, but can be
+        called outside an `changing_parents` context.
+
+        A very small number of extension meddle with the working copy content
+        in a way that requires to adjust the dirstate accordingly. At the time
+        this command is written they are :
+        - keyword,
+        - largefile,
+        PLEASE DO NOT GROW THIS LIST ANY FURTHER.
+
+        This function could probably be replaced by more semantic one (like
+        "adjust expected size" or "always revalidate file content", etc)
+        however at the time where this is writen, this is too much of a detour
+        to be considered.
+        """
+        if not (self._changing_level > 0 or self._running_status > 0):
+            msg = "requires a changes context"
+            raise error.ProgrammingError(msg)
+        self._update_file(
+            *args,
+            **kwargs,
+        )
+
+    def _update_file(
+        self,
+        filename,
+        wc_tracked,
+        p1_tracked,
+        p2_info=False,
+        possibly_dirty=False,
+        parentfiledata=None,
+    ):
 
         # note: I do not think we need to double check name clash here since we
         # are in a update/merge case that should already have taken care of
@@ -680,12 +979,16 @@
             return self._normalize(path, isknown, ignoremissing)
         return path
 
+    # XXX this method is barely used, as a result:
+    # - its semantic is unclear
+    # - do we really needs it ?
+    @requires_changing_parents
     def clear(self):
         self._map.clear()
         self._dirty = True
 
+    @requires_changing_parents
     def rebuild(self, parent, allfiles, changedfiles=None):
-
         matcher = self._sparsematcher
         if matcher is not None and not matcher.always():
             # should not add non-matching files
@@ -724,7 +1027,6 @@
         self._map.setparents(parent, self._nodeconstants.nullid)
 
         for f in to_lookup:
-
             if self.in_merge:
                 self.set_tracked(f)
             else:
@@ -738,31 +1040,48 @@
 
         self._dirty = True
 
-    def identity(self):
-        """Return identity of dirstate itself to detect changing in storage
+    def _setup_tr_abort(self, tr):
+        """make sure we invalidate the current change on abort"""
+        if tr is None:
+            return
 
-        If identity of previous dirstate is equal to this, writing
-        changes based on the former dirstate out can keep consistency.
-        """
-        return self._map.identity
+        def on_abort(tr):
+            self._attached_to_a_transaction = False
+            self.invalidate()
+
+        tr.addabort(
+            b'dirstate-invalidate%s' % self._tr_key_suffix,
+            on_abort,
+        )
 
     def write(self, tr):
         if not self._dirty:
             return
+        # make sure we don't request a write of invalidated content
+        # XXX move before the dirty check once `unlock` stop calling `write`
+        assert not self._invalidated_context
 
         write_key = self._use_tracked_hint and self._dirty_tracked_set
         if tr:
+
+            self._setup_tr_abort(tr)
+            self._attached_to_a_transaction = True
+
+            def on_success(f):
+                self._attached_to_a_transaction = False
+                self._writedirstate(tr, f),
+
             # delay writing in-memory changes out
             tr.addfilegenerator(
-                b'dirstate-1-main',
+                b'dirstate-1-main%s' % self._tr_key_suffix,
                 (self._filename,),
-                lambda f: self._writedirstate(tr, f),
+                on_success,
                 location=b'plain',
                 post_finalize=True,
             )
             if write_key:
                 tr.addfilegenerator(
-                    b'dirstate-2-key-post',
+                    b'dirstate-2-key-post%s' % self._tr_key_suffix,
                     (self._filename_th,),
                     lambda f: self._write_tracked_hint(tr, f),
                     location=b'plain',
@@ -798,6 +1117,8 @@
         self._plchangecallbacks[category] = callback
 
     def _writedirstate(self, tr, st):
+        # make sure we don't write invalidated content
+        assert not self._invalidated_context
         # notify callbacks about parents change
         if self._origpl is not None and self._origpl != self._pl:
             for c, callback in sorted(self._plchangecallbacks.items()):
@@ -936,7 +1257,8 @@
                     badfn(ff, badtype(kind))
                     if nf in dmap:
                         results[nf] = None
-            except OSError as inst:  # nf not found on disk - it is dirstate only
+            except (OSError) as inst:
+                # nf not found on disk - it is dirstate only
                 if nf in dmap:  # does it exactly match a missing file?
                     results[nf] = None
                 else:  # does it match a missing directory?
@@ -1246,7 +1568,7 @@
                         )
                     )
 
-        for (fn, message) in bad:
+        for fn, message in bad:
             matcher.bad(fn, encoding.strtolocal(message))
 
         status = scmutil.status(
@@ -1276,6 +1598,9 @@
             files that have definitely not been modified since the
             dirstate was written
         """
+        if not self._running_status:
+            msg = "Calling `status` outside a `running_status` context"
+            raise error.ProgrammingError(msg)
         listignored, listclean, listunknown = ignored, clean, unknown
         lookup, modified, added, unknown, ignored = [], [], [], [], []
         removed, deleted, clean = [], [], []
@@ -1435,142 +1760,43 @@
         else:
             return self._filename
 
-    def data_backup_filename(self, backupname):
-        if not self._use_dirstate_v2:
-            return None
-        return backupname + b'.v2-data'
-
-    def _new_backup_data_filename(self, backupname):
-        """return a filename to backup a data-file or None"""
-        if not self._use_dirstate_v2:
-            return None
-        if self._map.docket.uuid is None:
-            # not created yet, nothing to backup
-            return None
-        data_filename = self._map.docket.data_filename()
-        return data_filename, self.data_backup_filename(backupname)
-
-    def backup_data_file(self, backupname):
-        if not self._use_dirstate_v2:
-            return None
-        docket = docketmod.DirstateDocket.parse(
-            self._opener.read(backupname),
-            self._nodeconstants,
-        )
-        return self.data_backup_filename(backupname), docket.data_filename()
-
-    def savebackup(self, tr, backupname):
-        '''Save current dirstate into backup file'''
-        filename = self._actualfilename(tr)
-        assert backupname != filename
+    def all_file_names(self):
+        """list all filename currently used by this dirstate
 
-        # use '_writedirstate' instead of 'write' to write changes certainly,
-        # because the latter omits writing out if transaction is running.
-        # output file will be used to create backup of dirstate at this point.
-        if self._dirty or not self._opener.exists(filename):
-            self._writedirstate(
-                tr,
-                self._opener(filename, b"w", atomictemp=True, checkambig=True),
-            )
+        This is only used to do `hg rollback` related backup in the transaction
+        """
+        files = [b'branch']
+        if self._opener.exists(self._filename):
+            files.append(self._filename)
+            if self._use_dirstate_v2:
+                files.append(self._map.docket.data_filename())
+        return tuple(files)
 
-        if tr:
-            # ensure that subsequent tr.writepending returns True for
-            # changes written out above, even if dirstate is never
-            # changed after this
-            tr.addfilegenerator(
-                b'dirstate-1-main',
-                (self._filename,),
-                lambda f: self._writedirstate(tr, f),
-                location=b'plain',
-                post_finalize=True,
-            )
-
-            # ensure that pending file written above is unlinked at
-            # failure, even if tr.writepending isn't invoked until the
-            # end of this transaction
-            tr.registertmp(filename, location=b'plain')
-
-        self._opener.tryunlink(backupname)
-        # hardlink backup is okay because _writedirstate is always called
-        # with an "atomictemp=True" file.
-        util.copyfile(
-            self._opener.join(filename),
-            self._opener.join(backupname),
-            hardlink=True,
+    def verify(self, m1, m2, p1, narrow_matcher=None):
+        """
+        check the dirstate contents against the parent manifest and yield errors
+        """
+        missing_from_p1 = _(
+            b"%s marked as tracked in p1 (%s) but not in manifest1\n"
         )
-        data_pair = self._new_backup_data_filename(backupname)
-        if data_pair is not None:
-            data_filename, bck_data_filename = data_pair
-            util.copyfile(
-                self._opener.join(data_filename),
-                self._opener.join(bck_data_filename),
-                hardlink=True,
-            )
-            if tr is not None:
-                # ensure that pending file written above is unlinked at
-                # failure, even if tr.writepending isn't invoked until the
-                # end of this transaction
-                tr.registertmp(bck_data_filename, location=b'plain')
-
-    def restorebackup(self, tr, backupname):
-        '''Restore dirstate by backup file'''
-        # this "invalidate()" prevents "wlock.release()" from writing
-        # changes of dirstate out after restoring from backup file
-        self.invalidate()
-        o = self._opener
-        if not o.exists(backupname):
-            # there was no file backup, delete existing files
-            filename = self._actualfilename(tr)
-            data_file = None
-            if self._use_dirstate_v2 and self._map.docket.uuid is not None:
-                data_file = self._map.docket.data_filename()
-            if o.exists(filename):
-                o.unlink(filename)
-            if data_file is not None and o.exists(data_file):
-                o.unlink(data_file)
-            return
-        filename = self._actualfilename(tr)
-        data_pair = self.backup_data_file(backupname)
-        if o.exists(filename) and util.samefile(
-            o.join(backupname), o.join(filename)
-        ):
-            o.unlink(backupname)
-        else:
-            o.rename(backupname, filename, checkambig=True)
-
-        if data_pair is not None:
-            data_backup, target = data_pair
-            if o.exists(target) and util.samefile(
-                o.join(data_backup), o.join(target)
-            ):
-                o.unlink(data_backup)
-            else:
-                o.rename(data_backup, target, checkambig=True)
-
-    def clearbackup(self, tr, backupname):
-        '''Clear backup file'''
-        o = self._opener
-        if o.exists(backupname):
-            data_backup = self.backup_data_file(backupname)
-            o.unlink(backupname)
-            if data_backup is not None:
-                o.unlink(data_backup[0])
-
-    def verify(self, m1, m2):
-        """check the dirstate content again the parent manifest and yield errors"""
-        missing_from_p1 = b"%s in state %s, but not in manifest1\n"
-        unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
-        missing_from_ps = b"%s in state %s, but not in either manifest\n"
-        missing_from_ds = b"%s in manifest1, but listed as state %s\n"
+        unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
+        missing_from_ps = _(
+            b"%s marked as modified, but not in either manifest\n"
+        )
+        missing_from_ds = _(
+            b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
+        )
         for f, entry in self.items():
-            state = entry.state
-            if state in b"nr" and f not in m1:
-                yield (missing_from_p1, f, state)
-            if state in b"a" and f in m1:
-                yield (unexpected_in_p1, f, state)
-            if state in b"m" and f not in m1 and f not in m2:
-                yield (missing_from_ps, f, state)
+            if entry.p1_tracked:
+                if entry.modified and f not in m1 and f not in m2:
+                    yield missing_from_ps % f
+                elif f not in m1:
+                    yield missing_from_p1 % (f, node.short(p1))
+            if entry.added and f in m1:
+                yield unexpected_in_p1 % f
         for f in m1:
-            state = self.get_entry(f).state
-            if state not in b"nrm":
-                yield (missing_from_ds, f, state)
+            if narrow_matcher is not None and not narrow_matcher(f):
+                continue
+            entry = self.get_entry(f)
+            if not entry.p1_tracked:
+                yield missing_from_ds % (f, node.short(p1))
--- a/mercurial/dirstateguard.py	Thu Mar 02 15:21:36 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-# dirstateguard.py - class to allow restoring dirstate after failure
-#
-# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-
-import os
-from .i18n import _
-
-from . import (
-    error,
-    narrowspec,
-    requirements,
-    util,
-)
-
-
-class dirstateguard(util.transactional):
-    """Restore dirstate at unexpected failure.
-
-    At the construction, this class does:
-
-    - write current ``repo.dirstate`` out, and
-    - save ``.hg/dirstate`` into the backup file
-
-    This restores ``.hg/dirstate`` from backup file, if ``release()``
-    is invoked before ``close()``.
-
-    This just removes the backup file at ``close()`` before ``release()``.
-    """
-
-    def __init__(self, repo, name):
-        self._repo = repo
-        self._active = False
-        self._closed = False
-
-        def getname(prefix):
-            fd, fname = repo.vfs.mkstemp(prefix=prefix)
-            os.close(fd)
-            return fname
-
-        self._backupname = getname(b'dirstate.backup.%s.' % name)
-        repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
-        # Don't make this the empty string, things may join it with stuff and
-        # blindly try to unlink it, which could be bad.
-        self._narrowspecbackupname = None
-        if requirements.NARROW_REQUIREMENT in repo.requirements:
-            self._narrowspecbackupname = getname(
-                b'narrowspec.backup.%s.' % name
-            )
-            narrowspec.savewcbackup(repo, self._narrowspecbackupname)
-        self._active = True
-
-    def __del__(self):
-        if self._active:  # still active
-            # this may occur, even if this class is used correctly:
-            # for example, releasing other resources like transaction
-            # may raise exception before ``dirstateguard.release`` in
-            # ``release(tr, ....)``.
-            self._abort()
-
-    def close(self):
-        if not self._active:  # already inactivated
-            msg = (
-                _(b"can't close already inactivated backup: %s")
-                % self._backupname
-            )
-            raise error.Abort(msg)
-
-        self._repo.dirstate.clearbackup(
-            self._repo.currenttransaction(), self._backupname
-        )
-        if self._narrowspecbackupname:
-            narrowspec.clearwcbackup(self._repo, self._narrowspecbackupname)
-        self._active = False
-        self._closed = True
-
-    def _abort(self):
-        if self._narrowspecbackupname:
-            narrowspec.restorewcbackup(self._repo, self._narrowspecbackupname)
-        self._repo.dirstate.restorebackup(
-            self._repo.currenttransaction(), self._backupname
-        )
-        self._active = False
-
-    def release(self):
-        if not self._closed:
-            if not self._active:  # already inactivated
-                msg = (
-                    _(b"can't release already inactivated backup: %s")
-                    % self._backupname
-                )
-                raise error.Abort(msg)
-            self._abort()
--- a/mercurial/dirstatemap.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/dirstatemap.py	Thu Mar 02 22:45:44 2023 +0100
@@ -77,9 +77,32 @@
         self._pendingmode = None
 
     def _set_identity(self):
-        # ignore HG_PENDING because identity is used only for writing
-        file_path = self._opener.join(self._filename)
-        self.identity = util.filestat.frompath(file_path)
+        self.identity = self._get_current_identity()
+
+    def _get_current_identity(self):
+        try:
+            return util.cachestat(self._opener.join(self._filename))
+        except FileNotFoundError:
+            return None
+
+    def may_need_refresh(self):
+        if 'identity' not in vars(self):
+            # no existing identity, we need a refresh
+            return True
+        if self.identity is None:
+            return True
+        if not self.identity.cacheable():
+            # We cannot trust the entry
+            # XXX this is a problem on windows, NFS, or other inode less system
+            return True
+        current_identity = self._get_current_identity()
+        if current_identity is None:
+            return True
+        if not current_identity.cacheable():
+            # We cannot trust the entry
+            # XXX this is a problem on windows, NFS, or other inode less system
+            return True
+        return current_identity != self.identity
 
     def preload(self):
         """Loads the underlying data, if it's not already loaded"""
@@ -161,6 +184,9 @@
             raise error.ProgrammingError(b'dirstate docket name collision')
         data_filename = new_docket.data_filename()
         self._opener.write(data_filename, packed)
+        # tell the transaction that we are adding a new file
+        if tr is not None:
+            tr.addbackup(data_filename, location=b'plain')
         # Write the new docket after the new data file has been
         # written. Because `st` was opened with `atomictemp=True`,
         # the actual `.hg/dirstate` file is only affected on close.
@@ -170,6 +196,8 @@
         # the new data file was written.
         if old_docket.uuid:
             data_filename = old_docket.data_filename()
+            if tr is not None:
+                tr.addbackup(data_filename, location=b'plain')
             unlink = lambda _tr=None: self._opener.unlink(data_filename)
             if tr:
                 category = b"dirstate-v2-clean-" + old_docket.uuid
@@ -676,6 +704,14 @@
             if append:
                 docket = self.docket
                 data_filename = docket.data_filename()
+                # We mark it for backup to make sure a future `hg rollback` (or
+                # `hg recover`?) call find the data it needs to restore a
+                # working repository.
+                #
+                # The backup can use a hardlink because the format is resistant
+                # to trailing "dead" data.
+                if tr is not None:
+                    tr.addbackup(data_filename, location=b'plain')
                 with self._opener(data_filename, b'r+b') as fp:
                     fp.seek(docket.data_size)
                     assert fp.tell() == docket.data_size
--- a/mercurial/dispatch.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/dispatch.py	Thu Mar 02 22:45:44 2023 +0100
@@ -980,7 +980,8 @@
             lui.readconfig(os.path.join(path, b".hg", b"hgrc-not-shared"), path)
 
     if rpath:
-        path = urlutil.get_clone_path(lui, rpath)[0]
+        path_obj = urlutil.get_clone_path_obj(lui, rpath)
+        path = path_obj.rawloc
         lui = ui.copy()
         if rcutil.use_repo_hgrc():
             _readsharedsourceconfig(lui, path)
--- a/mercurial/exchange.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/exchange.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1183,7 +1183,12 @@
             trgetter = None
             if pushback:
                 trgetter = pushop.trmanager.transaction
-            op = bundle2.processbundle(pushop.repo, reply, trgetter)
+            op = bundle2.processbundle(
+                pushop.repo,
+                reply,
+                trgetter,
+                remote=pushop.remote,
+            )
         except error.BundleValueError as exc:
             raise error.RemoteError(_(b'missing support for %s') % exc)
         except bundle2.AbortFromPart as exc:
@@ -1903,10 +1908,18 @@
 
         try:
             op = bundle2.bundleoperation(
-                pullop.repo, pullop.gettransaction, source=b'pull'
+                pullop.repo,
+                pullop.gettransaction,
+                source=b'pull',
+                remote=pullop.remote,
             )
             op.modes[b'bookmarks'] = b'records'
-            bundle2.processbundle(pullop.repo, bundle, op=op)
+            bundle2.processbundle(
+                pullop.repo,
+                bundle,
+                op=op,
+                remote=pullop.remote,
+            )
         except bundle2.AbortFromPart as exc:
             pullop.repo.ui.error(_(b'remote: abort: %s\n') % exc)
             raise error.RemoteError(_(b'pull failed on remote'), hint=exc.hint)
@@ -1995,7 +2008,12 @@
             ).result()
 
     bundleop = bundle2.applybundle(
-        pullop.repo, cg, tr, b'pull', pullop.remote.url()
+        pullop.repo,
+        cg,
+        tr,
+        b'pull',
+        pullop.remote.url(),
+        remote=pullop.remote,
     )
     pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
 
--- a/mercurial/filelog.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/filelog.py	Thu Mar 02 22:45:44 2023 +0100
@@ -111,6 +111,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         return self._revlog.emitrevisions(
             nodes,
@@ -119,6 +120,7 @@
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
     def addrevision(
@@ -151,6 +153,8 @@
         addrevisioncb=None,
         duplicaterevisioncb=None,
         maybemissingparents=False,
+        debug_info=None,
+        delta_base_reuse_policy=None,
     ):
         if maybemissingparents:
             raise error.Abort(
@@ -171,6 +175,8 @@
                 transaction,
                 addrevisioncb=addrevisioncb,
                 duplicaterevisioncb=duplicaterevisioncb,
+                debug_info=debug_info,
+                delta_base_reuse_policy=delta_base_reuse_policy,
             )
 
     def getstrippoint(self, minlink):
--- a/mercurial/filemerge.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/filemerge.py	Thu Mar 02 22:45:44 2023 +0100
@@ -158,7 +158,7 @@
             continue
         p = util.lookupreg(k, _toolstr(ui, tool, b"regname"))
         if p:
-            p = procutil.findexe(p + _toolstr(ui, tool, b"regappend", b""))
+            p = procutil.findexe(p + _toolstr(ui, tool, b"regappend"))
             if p:
                 return p
     exe = _toolstr(ui, tool, b"executable", tool)
@@ -478,8 +478,9 @@
     """
     Uses the internal non-interactive simple merge algorithm for merging
     files. It will fail if there are any conflicts and leave markers in
-    the partially merged file. Markers will have two sections, one for each side
-    of merge, unless mode equals 'union' which suppresses the markers."""
+    the partially merged file. Markers will have two sections, one for each
+    side of merge, unless mode equals 'union' or 'union-other-first' which
+    suppresses the markers."""
     ui = repo.ui
 
     try:
@@ -510,12 +511,28 @@
 def _iunion(repo, mynode, local, other, base, toolconf, backup):
     """
     Uses the internal non-interactive simple merge algorithm for merging
-    files. It will use both left and right sides for conflict regions.
+    files. It will use both local and other sides for conflict regions by
+    adding local on top of other.
     No markers are inserted."""
     return _merge(repo, local, other, base, b'union')
 
 
 @internaltool(
+    b'union-other-first',
+    fullmerge,
+    _(
+        b"warning: conflicts while merging %s! "
+        b"(edit, then use 'hg resolve --mark')\n"
+    ),
+    precheck=_mergecheck,
+)
+def _iunion_other_first(repo, mynode, local, other, base, toolconf, backup):
+    """
+    Like :union, but add other on top of local."""
+    return _merge(repo, local, other, base, b'union-other-first')
+
+
+@internaltool(
     b'merge',
     fullmerge,
     _(
--- a/mercurial/help.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/help.py	Thu Mar 02 22:45:44 2023 +0100
@@ -10,6 +10,18 @@
 import re
 import textwrap
 
+from typing import (
+    Callable,
+    Dict,
+    Iterable,
+    List,
+    Optional,
+    Set,
+    Tuple,
+    Union,
+    cast,
+)
+
 from .i18n import (
     _,
     gettext,
@@ -40,7 +52,16 @@
     stringutil,
 )
 
-_exclkeywords = {
+_DocLoader = Callable[[uimod.ui], bytes]
+# Old extensions may not register with a category
+_HelpEntry = Union["_HelpEntryNoCategory", "_HelpEntryWithCategory"]
+_HelpEntryNoCategory = Tuple[List[bytes], bytes, _DocLoader]
+_HelpEntryWithCategory = Tuple[List[bytes], bytes, _DocLoader, bytes]
+_SelectFn = Callable[[object], bool]
+_SynonymTable = Dict[bytes, List[bytes]]
+_TopicHook = Callable[[uimod.ui, bytes, bytes], bytes]
+
+_exclkeywords: Set[bytes] = {
     b"(ADVANCED)",
     b"(DEPRECATED)",
     b"(EXPERIMENTAL)",
@@ -56,7 +77,7 @@
 # Extensions with custom categories should insert them into this list
 # after/before the appropriate item, rather than replacing the list or
 # assuming absolute positions.
-CATEGORY_ORDER = [
+CATEGORY_ORDER: List[bytes] = [
     registrar.command.CATEGORY_REPO_CREATION,
     registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT,
     registrar.command.CATEGORY_COMMITTING,
@@ -74,7 +95,7 @@
 
 # Human-readable category names. These are translated.
 # Extensions with custom categories should add their names here.
-CATEGORY_NAMES = {
+CATEGORY_NAMES: Dict[bytes, bytes] = {
     registrar.command.CATEGORY_REPO_CREATION: b'Repository creation',
     registrar.command.CATEGORY_REMOTE_REPO_MANAGEMENT: b'Remote repository management',
     registrar.command.CATEGORY_COMMITTING: b'Change creation',
@@ -102,7 +123,7 @@
 # Extensions with custom categories should insert them into this list
 # after/before the appropriate item, rather than replacing the list or
 # assuming absolute positions.
-TOPIC_CATEGORY_ORDER = [
+TOPIC_CATEGORY_ORDER: List[bytes] = [
     TOPIC_CATEGORY_IDS,
     TOPIC_CATEGORY_OUTPUT,
     TOPIC_CATEGORY_CONFIG,
@@ -112,7 +133,7 @@
 ]
 
 # Human-readable topic category names. These are translated.
-TOPIC_CATEGORY_NAMES = {
+TOPIC_CATEGORY_NAMES: Dict[bytes, bytes] = {
     TOPIC_CATEGORY_IDS: b'Mercurial identifiers',
     TOPIC_CATEGORY_OUTPUT: b'Mercurial output',
     TOPIC_CATEGORY_CONFIG: b'Mercurial configuration',
@@ -122,7 +143,12 @@
 }
 
 
-def listexts(header, exts, indent=1, showdeprecated=False):
+def listexts(
+    header: bytes,
+    exts: Dict[bytes, bytes],
+    indent: int = 1,
+    showdeprecated: bool = False,
+) -> List[bytes]:
     '''return a text listing of the given extensions'''
     rst = []
     if exts:
@@ -135,7 +161,7 @@
     return rst
 
 
-def extshelp(ui):
+def extshelp(ui: uimod.ui) -> bytes:
     rst = loaddoc(b'extensions')(ui).splitlines(True)
     rst.extend(
         listexts(
@@ -153,7 +179,7 @@
     return doc
 
 
-def parsedefaultmarker(text):
+def parsedefaultmarker(text: bytes) -> Optional[Tuple[bytes, List[bytes]]]:
     """given a text 'abc (DEFAULT: def.ghi)',
     returns (b'abc', (b'def', b'ghi')). Otherwise return None"""
     if text[-1:] == b')':
@@ -164,7 +190,7 @@
             return text[:pos], item.split(b'.', 2)
 
 
-def optrst(header, options, verbose, ui):
+def optrst(header: bytes, options, verbose: bool, ui: uimod.ui) -> bytes:
     data = []
     multioccur = False
     for option in options:
@@ -220,13 +246,15 @@
     return b''.join(rst)
 
 
-def indicateomitted(rst, omitted, notomitted=None):
+def indicateomitted(
+    rst: List[bytes], omitted: bytes, notomitted: Optional[bytes] = None
+) -> None:
     rst.append(b'\n\n.. container:: omitted\n\n    %s\n\n' % omitted)
     if notomitted:
         rst.append(b'\n\n.. container:: notomitted\n\n    %s\n\n' % notomitted)
 
 
-def filtercmd(ui, cmd, func, kw, doc):
+def filtercmd(ui: uimod.ui, cmd: bytes, func, kw: bytes, doc: bytes) -> bool:
     if not ui.debugflag and cmd.startswith(b"debug") and kw != b"debug":
         # Debug command, and user is not looking for those.
         return True
@@ -249,11 +277,13 @@
     return False
 
 
-def filtertopic(ui, topic):
+def filtertopic(ui: uimod.ui, topic: bytes) -> bool:
     return ui.configbool(b'help', b'hidden-topic.%s' % topic, False)
 
 
-def topicmatch(ui, commands, kw):
+def topicmatch(
+    ui: uimod.ui, commands, kw: bytes
+) -> Dict[bytes, List[Tuple[bytes, bytes]]]:
     """Return help topics matching kw.
 
     Returns {'section': [(name, summary), ...], ...} where section is
@@ -326,10 +356,10 @@
     return results
 
 
-def loaddoc(topic, subdir=None):
+def loaddoc(topic: bytes, subdir: Optional[bytes] = None) -> _DocLoader:
     """Return a delayed loader for help/topic.txt."""
 
-    def loader(ui):
+    def loader(ui: uimod.ui) -> bytes:
         package = b'mercurial.helptext'
         if subdir:
             package += b'.' + subdir
@@ -342,7 +372,7 @@
     return loader
 
 
-internalstable = sorted(
+internalstable: List[_HelpEntryNoCategory] = sorted(
     [
         (
             [b'bid-merge'],
@@ -407,7 +437,7 @@
 )
 
 
-def internalshelp(ui):
+def internalshelp(ui: uimod.ui) -> bytes:
     """Generate the index for the "internals" topic."""
     lines = [
         b'To access a subtopic, use "hg help internals.{subtopic-name}"\n',
@@ -419,7 +449,7 @@
     return b''.join(lines)
 
 
-helptable = sorted(
+helptable: List[_HelpEntryWithCategory] = sorted(
     [
         (
             [b'bundlespec'],
@@ -581,20 +611,27 @@
 )
 
 # Maps topics with sub-topics to a list of their sub-topics.
-subtopics = {
+subtopics: Dict[bytes, List[_HelpEntryNoCategory]] = {
     b'internals': internalstable,
 }
 
 # Map topics to lists of callable taking the current topic help and
 # returning the updated version
-helphooks = {}
+helphooks: Dict[bytes, List[_TopicHook]] = {}
 
 
-def addtopichook(topic, rewriter):
+def addtopichook(topic: bytes, rewriter: _TopicHook) -> None:
     helphooks.setdefault(topic, []).append(rewriter)
 
 
-def makeitemsdoc(ui, topic, doc, marker, items, dedent=False):
+def makeitemsdoc(
+    ui: uimod.ui,
+    topic: bytes,
+    doc: bytes,
+    marker: bytes,
+    items: Dict[bytes, bytes],
+    dedent: bool = False,
+) -> bytes:
     """Extract docstring from the items key to function mapping, build a
     single documentation block and use it to overwrite the marker in doc.
     """
@@ -622,8 +659,10 @@
     return doc.replace(marker, entries)
 
 
-def addtopicsymbols(topic, marker, symbols, dedent=False):
-    def add(ui, topic, doc):
+def addtopicsymbols(
+    topic: bytes, marker: bytes, symbols, dedent: bool = False
+) -> None:
+    def add(ui: uimod.ui, topic: bytes, doc: bytes):
         return makeitemsdoc(ui, topic, doc, marker, symbols, dedent=dedent)
 
     addtopichook(topic, add)
@@ -647,7 +686,7 @@
 )
 
 
-def inserttweakrc(ui, topic, doc):
+def inserttweakrc(ui: uimod.ui, topic: bytes, doc: bytes) -> bytes:
     marker = b'.. tweakdefaultsmarker'
     repl = uimod.tweakrc
 
@@ -658,7 +697,9 @@
     return re.sub(br'( *)%s' % re.escape(marker), sub, doc)
 
 
-def _getcategorizedhelpcmds(ui, cmdtable, name, select=None):
+def _getcategorizedhelpcmds(
+    ui: uimod.ui, cmdtable, name: bytes, select: Optional[_SelectFn] = None
+) -> Tuple[Dict[bytes, List[bytes]], Dict[bytes, bytes], _SynonymTable]:
     # Category -> list of commands
     cats = {}
     # Command -> short description
@@ -687,16 +728,18 @@
     return cats, h, syns
 
 
-def _getcategorizedhelptopics(ui, topictable):
+def _getcategorizedhelptopics(
+    ui: uimod.ui, topictable: List[_HelpEntry]
+) -> Tuple[Dict[bytes, List[Tuple[bytes, bytes]]], Dict[bytes, List[bytes]]]:
     # Group commands by category.
     topiccats = {}
     syns = {}
     for topic in topictable:
         names, header, doc = topic[0:3]
         if len(topic) > 3 and topic[3]:
-            category = topic[3]
+            category: bytes = cast(bytes, topic[3])  # help pytype
         else:
-            category = TOPIC_CATEGORY_NONE
+            category: bytes = TOPIC_CATEGORY_NONE
 
         topicname = names[0]
         syns[topicname] = list(names)
@@ -709,15 +752,15 @@
 
 
 def help_(
-    ui,
+    ui: uimod.ui,
     commands,
-    name,
-    unknowncmd=False,
-    full=True,
-    subtopic=None,
-    fullname=None,
+    name: bytes,
+    unknowncmd: bool = False,
+    full: bool = True,
+    subtopic: Optional[bytes] = None,
+    fullname: Optional[bytes] = None,
     **opts
-):
+) -> bytes:
     """
     Generate the help for 'name' as unformatted restructured text. If
     'name' is None, describe the commands available.
@@ -725,7 +768,7 @@
 
     opts = pycompat.byteskwargs(opts)
 
-    def helpcmd(name, subtopic=None):
+    def helpcmd(name: bytes, subtopic: Optional[bytes]) -> List[bytes]:
         try:
             aliases, entry = cmdutil.findcmd(
                 name, commands.table, strict=unknowncmd
@@ -826,7 +869,7 @@
 
         return rst
 
-    def helplist(select=None, **opts):
+    def helplist(select: Optional[_SelectFn] = None, **opts) -> List[bytes]:
         cats, h, syns = _getcategorizedhelpcmds(
             ui, commands.table, name, select
         )
@@ -846,7 +889,7 @@
             else:
                 rst.append(_(b'list of commands:\n'))
 
-        def appendcmds(cmds):
+        def appendcmds(cmds: Iterable[bytes]) -> None:
             cmds = sorted(cmds)
             for c in cmds:
                 display_cmd = c
@@ -955,7 +998,7 @@
                 )
         return rst
 
-    def helptopic(name, subtopic=None):
+    def helptopic(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]:
         # Look for sub-topic entry first.
         header, doc = None, None
         if subtopic and name in subtopics:
@@ -998,7 +1041,7 @@
             pass
         return rst
 
-    def helpext(name, subtopic=None):
+    def helpext(name: bytes, subtopic: Optional[bytes] = None) -> List[bytes]:
         try:
             mod = extensions.find(name)
             doc = gettext(pycompat.getdoc(mod)) or _(b'no help text available')
@@ -1040,7 +1083,9 @@
             )
         return rst
 
-    def helpextcmd(name, subtopic=None):
+    def helpextcmd(
+        name: bytes, subtopic: Optional[bytes] = None
+    ) -> List[bytes]:
         cmd, ext, doc = extensions.disabledcmd(
             ui, name, ui.configbool(b'ui', b'strict')
         )
@@ -1127,8 +1172,14 @@
 
 
 def formattedhelp(
-    ui, commands, fullname, keep=None, unknowncmd=False, full=True, **opts
-):
+    ui: uimod.ui,
+    commands,
+    fullname: Optional[bytes],
+    keep: Optional[Iterable[bytes]] = None,
+    unknowncmd: bool = False,
+    full: bool = True,
+    **opts
+) -> bytes:
     """get help for a given topic (as a dotted name) as rendered rst
 
     Either returns the rendered help text or raises an exception.
--- a/mercurial/helptext/config.txt	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/helptext/config.txt	Thu Mar 02 22:45:44 2023 +0100
@@ -1922,6 +1922,42 @@
   - ``ignore``: ignore bookmarks during exchange.
     (This currently only affect pulling)
 
+.. container:: verbose
+
+  ``pulled-delta-reuse-policy``
+  Control the policy regarding deltas sent by the remote during pulls.
+
+  This is an advanced option that non-admin users should not need to understand
+  or set. This option can be used to speed up pulls from trusted central
+  servers, or to fix-up deltas from older servers.
+
+  It supports the following values:
+
+  - ``default``: use the policy defined by
+    `storage.revlog.reuse-external-delta-parent`,
+
+  - ``no-reuse``: start a new optimal delta search for each new revision we add
+    to the repository. The deltas from the server will be reused when the base
+    it applies to is tested (this can be frequent if that base is the one and
+    unique parent of that revision). This can significantly slowdown pulls but
+    will result in an optimized storage space if the remote peer is sending poor
+    quality deltas.
+
+  - ``try-base``: try to reuse the deltas from the remote peer as long as they
+    create a valid delta-chain in the local repository. This speeds up the
+    unbundling process, but can result in sub-optimal storage space if the
+    remote peer is sending poor quality deltas.
+
+  - ``forced``: the deltas from the peer will be reused in all cases, even if
+    the resulting delta-chain is "invalid". This setting will ensure the bundle
+    is applied at minimal CPU cost, but it can result in longer delta chains
+    being created on the client, making revisions potentially slower to access
+    in the future. If you think you need this option, you should make sure you
+    are also talking to the Mercurial developer community to get confirmation.
+
+  See `hg help config.storage.revlog.reuse-external-delta-parent` for a similar
+  global option. That option defines the behavior of `default`.
+
 The following special named paths exist:
 
 ``default``
@@ -2281,6 +2317,21 @@
     To fix affected revisions that already exist within the repository, one can
     use :hg:`debug-repair-issue-6528`.
 
+.. container:: verbose
+
+    ``revlog.delta-parent-search.candidate-group-chunk-size``
+        Tune the number of delta bases the storage will consider in the
+        same "round" of search. In some very rare cases, using a smaller value
+        might result in faster processing at the possible expense of storage
+        space, while using larger values might result in slower processing at the
+        possible benefit of storage space. A value of "0" means no limitation.
+
+        default: no limitation
+
+        This is unlikely that you'll have to tune this configuration. If you think
+        you do, consider talking with the mercurial developer community about your
+        repositories.
+
 ``revlog.optimize-delta-parent-choice``
     When storing a merge revision, both parents will be equally considered as
     a possible delta base. This results in better delta selection and improved
--- a/mercurial/helptext/rust.txt	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/helptext/rust.txt	Thu Mar 02 22:45:44 2023 +0100
@@ -76,8 +76,8 @@
 MSRV
 ====
 
-The minimum supported Rust version is currently 1.48.0. The project's policy is
-to follow the version from Debian stable, to make the distributions' job easier.
+The minimum supported Rust version is currently 1.61.0. The project's policy is
+to follow the version from Debian testing, to make the distributions' job easier.
 
 rhg
 ===
--- a/mercurial/hg.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/hg.py	Thu Mar 02 22:45:44 2023 +0100
@@ -65,28 +65,12 @@
 sharedbookmarks = b'bookmarks'
 
 
-def _local(path):
-    path = util.expandpath(urlutil.urllocalpath(path))
-
-    try:
-        # we use os.stat() directly here instead of os.path.isfile()
-        # because the latter started returning `False` on invalid path
-        # exceptions starting in 3.8 and we care about handling
-        # invalid paths specially here.
-        st = os.stat(path)
-        isfile = stat.S_ISREG(st.st_mode)
-    except ValueError as e:
-        raise error.Abort(
-            _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
-        )
-    except OSError:
-        isfile = False
-
-    return isfile and bundlerepo or localrepo
-
-
 def addbranchrevs(lrepo, other, branches, revs):
-    peer = other.peer()  # a courtesy to callers using a localrepo for other
+    if util.safehasattr(other, 'peer'):
+        # a courtesy to callers using a localrepo for other
+        peer = other.peer()
+    else:
+        peer = other
     hashbranch, branches = branches
     if not hashbranch and not branches:
         x = revs or None
@@ -129,10 +113,47 @@
     return revs, revs[0]
 
 
-schemes = {
+def _isfile(path):
+    try:
+        # we use os.stat() directly here instead of os.path.isfile()
+        # because the latter started returning `False` on invalid path
+        # exceptions starting in 3.8 and we care about handling
+        # invalid paths specially here.
+        st = os.stat(path)
+    except ValueError as e:
+        msg = stringutil.forcebytestr(e)
+        raise error.Abort(_(b'invalid path %s: %s') % (path, msg))
+    except OSError:
+        return False
+    else:
+        return stat.S_ISREG(st.st_mode)
+
+
+class LocalFactory:
+    """thin wrapper to dispatch between localrepo and bundle repo"""
+
+    @staticmethod
+    def islocal(path: bytes) -> bool:
+        path = util.expandpath(urlutil.urllocalpath(path))
+        return not _isfile(path)
+
+    @staticmethod
+    def instance(ui, path, *args, **kwargs):
+        path = util.expandpath(urlutil.urllocalpath(path))
+        if _isfile(path):
+            cls = bundlerepo
+        else:
+            cls = localrepo
+        return cls.instance(ui, path, *args, **kwargs)
+
+
+repo_schemes = {
     b'bundle': bundlerepo,
     b'union': unionrepo,
-    b'file': _local,
+    b'file': LocalFactory,
+}
+
+peer_schemes = {
     b'http': httppeer,
     b'https': httppeer,
     b'ssh': sshpeer,
@@ -140,27 +161,23 @@
 }
 
 
-def _peerlookup(path):
-    u = urlutil.url(path)
-    scheme = u.scheme or b'file'
-    thing = schemes.get(scheme) or schemes[b'file']
-    try:
-        return thing(path)
-    except TypeError:
-        # we can't test callable(thing) because 'thing' can be an unloaded
-        # module that implements __call__
-        if not util.safehasattr(thing, b'instance'):
-            raise
-        return thing
-
-
 def islocal(repo):
     '''return true if repo (or path pointing to repo) is local'''
     if isinstance(repo, bytes):
-        try:
-            return _peerlookup(repo).islocal(repo)
-        except AttributeError:
-            return False
+        u = urlutil.url(repo)
+        scheme = u.scheme or b'file'
+        if scheme in peer_schemes:
+            cls = peer_schemes[scheme]
+            cls.make_peer  # make sure we load the module
+        elif scheme in repo_schemes:
+            cls = repo_schemes[scheme]
+            cls.instance  # make sure we load the module
+        else:
+            cls = LocalFactory
+        if util.safehasattr(cls, 'islocal'):
+            return cls.islocal(repo)  # pytype: disable=module-attr
+        return False
+    repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
     return repo.local()
 
 
@@ -177,13 +194,7 @@
 wirepeersetupfuncs = []
 
 
-def _peerorrepo(
-    ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
-):
-    """return a repository object for the specified path"""
-    obj = _peerlookup(path).instance(
-        ui, path, create, intents=intents, createopts=createopts
-    )
+def _setup_repo_or_peer(ui, obj, presetupfuncs=None):
     ui = getattr(obj, "ui", ui)
     for f in presetupfuncs or []:
         f(ui, obj)
@@ -195,14 +206,12 @@
             if hook:
                 with util.timedcm('reposetup %r', name) as stats:
                     hook(ui, obj)
-                ui.log(
-                    b'extension', b'  > reposetup for %s took %s\n', name, stats
-                )
+                msg = b'  > reposetup for %s took %s\n'
+                ui.log(b'extension', msg, name, stats)
     ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
     if not obj.local():
         for f in wirepeersetupfuncs:
             f(ui, obj)
-    return obj
 
 
 def repository(
@@ -214,28 +223,59 @@
     createopts=None,
 ):
     """return a repository object for the specified path"""
-    peer = _peerorrepo(
+    scheme = urlutil.url(path).scheme
+    if scheme is None:
+        scheme = b'file'
+    cls = repo_schemes.get(scheme)
+    if cls is None:
+        if scheme in peer_schemes:
+            raise error.Abort(_(b"repository '%s' is not local") % path)
+        cls = LocalFactory
+    repo = cls.instance(
         ui,
         path,
         create,
-        presetupfuncs=presetupfuncs,
         intents=intents,
         createopts=createopts,
     )
-    repo = peer.local()
-    if not repo:
-        raise error.Abort(
-            _(b"repository '%s' is not local") % (path or peer.url())
-        )
+    _setup_repo_or_peer(ui, repo, presetupfuncs=presetupfuncs)
     return repo.filtered(b'visible')
 
 
 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
     '''return a repository peer for the specified path'''
+    ui = getattr(uiorrepo, 'ui', uiorrepo)
     rui = remoteui(uiorrepo, opts)
-    return _peerorrepo(
-        rui, path, create, intents=intents, createopts=createopts
-    ).peer()
+    if util.safehasattr(path, 'url'):
+        # this is already a urlutil.path object
+        peer_path = path
+    else:
+        peer_path = urlutil.path(ui, None, rawloc=path, validate_path=False)
+    scheme = peer_path.url.scheme  # pytype: disable=attribute-error
+    if scheme in peer_schemes:
+        cls = peer_schemes[scheme]
+        peer = cls.make_peer(
+            rui,
+            peer_path,
+            create,
+            intents=intents,
+            createopts=createopts,
+        )
+        _setup_repo_or_peer(rui, peer)
+    else:
+        # this is a repository
+        repo_path = peer_path.loc  # pytype: disable=attribute-error
+        if not repo_path:
+            repo_path = peer_path.rawloc  # pytype: disable=attribute-error
+        repo = repository(
+            rui,
+            repo_path,
+            create,
+            intents=intents,
+            createopts=createopts,
+        )
+        peer = repo.peer(path=peer_path)
+    return peer
 
 
 def defaultdest(source):
@@ -290,17 +330,23 @@
 ):
     '''create a shared repository'''
 
-    if not islocal(source):
-        raise error.Abort(_(b'can only share local repositories'))
+    not_local_msg = _(b'can only share local repositories')
+    if util.safehasattr(source, 'local'):
+        if source.local() is None:
+            raise error.Abort(not_local_msg)
+    elif not islocal(source):
+        # XXX why are we getting bytes here ?
+        raise error.Abort(not_local_msg)
 
     if not dest:
         dest = defaultdest(source)
     else:
-        dest = urlutil.get_clone_path(ui, dest)[1]
+        dest = urlutil.get_clone_path_obj(ui, dest).loc
 
     if isinstance(source, bytes):
-        origsource, source, branches = urlutil.get_clone_path(ui, source)
-        srcrepo = repository(ui, source)
+        source_path = urlutil.get_clone_path_obj(ui, source)
+        srcrepo = repository(ui, source_path.loc)
+        branches = (source_path.branch, [])
         rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
     else:
         srcrepo = source.local()
@@ -411,7 +457,9 @@
         template = b'[paths]\ndefault = %s\n'
         destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
     if requirements.NARROW_REQUIREMENT in sourcerepo.requirements:
-        with destrepo.wlock():
+        with destrepo.wlock(), destrepo.lock(), destrepo.transaction(
+            b"narrow-share"
+        ):
             narrowspec.copytoworkingcopy(destrepo)
 
 
@@ -661,12 +709,23 @@
     """
 
     if isinstance(source, bytes):
-        src = urlutil.get_clone_path(ui, source, branch)
-        origsource, source, branches = src
-        srcpeer = peer(ui, peeropts, source)
+        src_path = urlutil.get_clone_path_obj(ui, source)
+        if src_path is None:
+            srcpeer = peer(ui, peeropts, b'')
+            origsource = source = b''
+            branches = (None, branch or [])
+        else:
+            srcpeer = peer(ui, peeropts, src_path)
+            origsource = src_path.rawloc
+            branches = (src_path.branch, branch or [])
+            source = src_path.loc
     else:
-        srcpeer = source.peer()  # in case we were called with a localrepo
+        if util.safehasattr(source, 'peer'):
+            srcpeer = source.peer()  # in case we were called with a localrepo
+        else:
+            srcpeer = source
         branches = (None, branch or [])
+        # XXX path: simply use the peer `path` object when this become available
         origsource = source = srcpeer.url()
     srclock = destlock = destwlock = cleandir = None
     destpeer = None
@@ -678,7 +737,11 @@
             if dest:
                 ui.status(_(b"destination directory: %s\n") % dest)
         else:
-            dest = urlutil.get_clone_path(ui, dest)[0]
+            dest_path = urlutil.get_clone_path_obj(ui, dest)
+            if dest_path is not None:
+                dest = dest_path.rawloc
+            else:
+                dest = b''
 
         dest = urlutil.urllocalpath(dest)
         source = urlutil.urllocalpath(source)
@@ -925,7 +988,9 @@
             local = destpeer.local()
             if local:
                 if narrow:
-                    with local.wlock(), local.lock():
+                    with local.wlock(), local.lock(), local.transaction(
+                        b'narrow-clone'
+                    ):
                         local.setnarrowpats(storeincludepats, storeexcludepats)
                         narrowspec.copytoworkingcopy(local)
 
@@ -1271,23 +1336,28 @@
         msg %= len(srcs)
         raise error.Abort(msg)
     path = srcs[0]
-    source, branches = urlutil.parseurl(path.rawloc, opts.get(b'branch'))
-    if subpath is not None:
+    if subpath is None:
+        peer_path = path
+        url = path.loc
+    else:
+        # XXX path: we are losing the `path` object here. Keeping it would be
+        # valuable. For example as a "variant" as we do for pushes.
         subpath = urlutil.url(subpath)
         if subpath.isabs():
-            source = bytes(subpath)
+            peer_path = url = bytes(subpath)
         else:
-            p = urlutil.url(source)
+            p = urlutil.url(path.loc)
             if p.islocal():
                 normpath = os.path.normpath
             else:
                 normpath = posixpath.normpath
             p.path = normpath(b'%s/%s' % (p.path, subpath))
-            source = bytes(p)
-    other = peer(repo, opts, source)
+            peer_path = url = bytes(p)
+    other = peer(repo, opts, peer_path)
     cleanupfn = other.close
     try:
-        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(source))
+        ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(url))
+        branches = (path.branch, opts.get(b'branch', []))
         revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
 
         if revs:
@@ -1346,7 +1416,7 @@
     out = set()
     others = []
     for path in urlutil.get_push_paths(repo, ui, dests):
-        dest = path.pushloc or path.loc
+        dest = path.loc
         if subpath is not None:
             subpath = urlutil.url(subpath)
             if subpath.isabs():
--- a/mercurial/hgweb/hgweb_mod.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/hgweb/hgweb_mod.py	Thu Mar 02 22:45:44 2023 +0100
@@ -230,8 +230,9 @@
 
     def sendtemplate(self, name, **kwargs):
         """Helper function to send a response generated from a template."""
-        kwargs = pycompat.byteskwargs(kwargs)
-        self.res.setbodygen(self.tmpl.generate(name, kwargs))
+        if self.req.method != b'HEAD':
+            kwargs = pycompat.byteskwargs(kwargs)
+            self.res.setbodygen(self.tmpl.generate(name, kwargs))
         return self.res.sendresponse()
 
 
--- a/mercurial/hgweb/request.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/hgweb/request.py	Thu Mar 02 22:45:44 2023 +0100
@@ -485,6 +485,7 @@
             self._bodybytes is None
             and self._bodygen is None
             and not self._bodywillwrite
+            and self._req.method != b'HEAD'
         ):
             raise error.ProgrammingError(b'response body not defined')
 
@@ -594,6 +595,8 @@
                 yield chunk
         elif self._bodywillwrite:
             self._bodywritefn = write
+        elif self._req.method == b'HEAD':
+            pass
         else:
             error.ProgrammingError(b'do not know how to send body')
 
--- a/mercurial/hgweb/server.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/hgweb/server.py	Thu Mar 02 22:45:44 2023 +0100
@@ -151,6 +151,9 @@
     def do_GET(self):
         self.do_POST()
 
+    def do_HEAD(self):
+        self.do_POST()
+
     def do_hgweb(self):
         self.sent_headers = False
         path, query = _splitURI(self.path)
@@ -246,7 +249,11 @@
             self.send_header(*h)
             if h[0].lower() == 'content-length':
                 self.length = int(h[1])
-        if self.length is None and saved_status[0] != common.HTTP_NOT_MODIFIED:
+        if (
+            self.length is None
+            and saved_status[0] != common.HTTP_NOT_MODIFIED
+            and self.command != 'HEAD'
+        ):
             self._chunked = (
                 not self.close_connection and self.request_version == 'HTTP/1.1'
             )
--- a/mercurial/hgweb/webcommands.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/hgweb/webcommands.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1299,6 +1299,9 @@
             b'sendresponse() should not emit data if writing later'
         )
 
+    if web.req.method == b'HEAD':
+        return []
+
     bodyfh = web.res.getbodyfile()
 
     archival.archive(
--- a/mercurial/httppeer.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/httppeer.py	Thu Mar 02 22:45:44 2023 +0100
@@ -382,8 +382,7 @@
 
 class httppeer(wireprotov1peer.wirepeer):
     def __init__(self, ui, path, url, opener, requestbuilder, caps):
-        self.ui = ui
-        self._path = path
+        super().__init__(ui, path=path)
         self._url = url
         self._caps = caps
         self.limitedarguments = caps is not None and b'httppostargs' not in caps
@@ -398,14 +397,11 @@
     # Begin of ipeerconnection interface.
 
     def url(self):
-        return self._path
+        return self.path.loc
 
     def local(self):
         return None
 
-    def peer(self):
-        return self
-
     def canpush(self):
         return True
 
@@ -605,14 +601,13 @@
     ``requestbuilder`` is the type used for constructing HTTP requests.
     It exists as an argument so extensions can override the default.
     """
-    u = urlutil.url(path)
-    if u.query or u.fragment:
-        raise error.Abort(
-            _(b'unsupported URL component: "%s"') % (u.query or u.fragment)
-        )
+    if path.url.query or path.url.fragment:
+        msg = _(b'unsupported URL component: "%s"')
+        msg %= path.url.query or path.url.fragment
+        raise error.Abort(msg)
 
     # urllib cannot handle URLs with embedded user or passwd.
-    url, authinfo = u.authinfo()
+    url, authinfo = path.url.authinfo()
     ui.debug(b'using %s\n' % url)
 
     opener = opener or urlmod.opener(ui, authinfo)
@@ -624,11 +619,11 @@
     )
 
 
-def instance(ui, path, create, intents=None, createopts=None):
+def make_peer(ui, path, create, intents=None, createopts=None):
     if create:
         raise error.Abort(_(b'cannot create new http repository'))
     try:
-        if path.startswith(b'https:') and not urlmod.has_https:
+        if path.url.scheme == b'https' and not urlmod.has_https:
             raise error.Abort(
                 _(b'Python support for SSL and HTTPS is not installed')
             )
@@ -638,7 +633,7 @@
         return inst
     except error.RepoError as httpexception:
         try:
-            r = statichttprepo.instance(ui, b"static-" + path, create)
+            r = statichttprepo.make_peer(ui, b"static-" + path.loc, create)
             ui.note(_(b'(falling back to static-http)\n'))
             return r
         except error.RepoError:
--- a/mercurial/interfaces/dirstate.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/interfaces/dirstate.py	Thu Mar 02 22:45:44 2023 +0100
@@ -12,6 +12,7 @@
         sparsematchfn,
         nodeconstants,
         use_dirstate_v2,
+        use_tracked_hint=False,
     ):
         """Create a new dirstate object.
 
@@ -23,6 +24,15 @@
     # TODO: all these private methods and attributes should be made
     # public or removed from the interface.
     _ignore = interfaceutil.Attribute("""Matcher for ignored files.""")
+    is_changing_any = interfaceutil.Attribute(
+        """True if any changes in progress."""
+    )
+    is_changing_parents = interfaceutil.Attribute(
+        """True if parents changes in progress."""
+    )
+    is_changing_files = interfaceutil.Attribute(
+        """True if file tracking changes in progress."""
+    )
 
     def _ignorefiles():
         """Return a list of files containing patterns to ignore."""
@@ -34,7 +44,7 @@
     _checkexec = interfaceutil.Attribute("""Callable for checking exec bits.""")
 
     @contextlib.contextmanager
-    def parentchange():
+    def changing_parents(repo):
         """Context manager for handling dirstate parents.
 
         If an exception occurs in the scope of the context manager,
@@ -42,16 +52,26 @@
         released.
         """
 
-    def pendingparentchange():
-        """Returns true if the dirstate is in the middle of a set of changes
-        that modify the dirstate parent.
+    @contextlib.contextmanager
+    def changing_files(repo):
+        """Context manager for handling dirstate files.
+
+        If an exception occurs in the scope of the context manager,
+        the incoherent dirstate won't be written when wlock is
+        released.
         """
 
     def hasdir(d):
         pass
 
     def flagfunc(buildfallback):
-        pass
+        """build a callable that returns flags associated with a filename
+
+        The information is extracted from three possible layers:
+        1. the file system if it supports the information
+        2. the "fallback" information stored in the dirstate if any
+        3. a more expensive mechanism inferring the flags from the parents.
+        """
 
     def getcwd():
         """Return the path from which a canonical path is calculated.
@@ -61,12 +81,12 @@
         used to get real file paths. Use vfs functions instead.
         """
 
+    def pathto(f, cwd=None):
+        pass
+
     def get_entry(path):
         """return a DirstateItem for the associated path"""
 
-    def pathto(f, cwd=None):
-        pass
-
     def __contains__(key):
         """Check if bytestring `key` is known to the dirstate."""
 
@@ -96,14 +116,14 @@
     def setparents(p1, p2=None):
         """Set dirstate parents to p1 and p2.
 
-        When moving from two parents to one, 'm' merged entries a
+        When moving from two parents to one, "merged" entries a
         adjusted to normal and previous copy records discarded and
         returned by the call.
 
         See localrepo.setparents()
         """
 
-    def setbranch(branch):
+    def setbranch(branch, transaction=None):
         pass
 
     def invalidate():
@@ -146,13 +166,6 @@
     def rebuild(parent, allfiles, changedfiles=None):
         pass
 
-    def identity():
-        """Return identity of dirstate it to detect changing in storage
-
-        If identity of previous dirstate is equal to this, writing
-        changes based on the former dirstate out can keep consistency.
-        """
-
     def write(tr):
         pass
 
@@ -200,11 +213,7 @@
         return files in the dirstate (in whatever state) filtered by match
         """
 
-    def savebackup(tr, backupname):
-        '''Save current dirstate into backup file'''
-
-    def restorebackup(tr, backupname):
-        '''Restore dirstate by backup file'''
-
-    def clearbackup(tr, backupname):
-        '''Clear backup file'''
+    def verify(m1, m2, p1, narrow_matcher=None):
+        """
+        check the dirstate contents against the parent manifest and yield errors
+        """
--- a/mercurial/interfaces/repository.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/interfaces/repository.py	Thu Mar 02 22:45:44 2023 +0100
@@ -103,6 +103,7 @@
     """
 
     ui = interfaceutil.Attribute("""ui.ui instance""")
+    path = interfaceutil.Attribute("""a urlutil.path instance or None""")
 
     def url():
         """Returns a URL string representing this peer.
@@ -123,12 +124,6 @@
         can be used to interface with it. Otherwise returns ``None``.
         """
 
-    def peer():
-        """Returns an object conforming to this interface.
-
-        Most implementations will ``return self``.
-        """
-
     def canpush():
         """Returns a boolean indicating if this peer can be pushed to."""
 
@@ -393,6 +388,10 @@
 
     limitedarguments = False
 
+    def __init__(self, ui, path=None):
+        self.ui = ui
+        self.path = path
+
     def capable(self, name):
         caps = self.capabilities()
         if name in caps:
@@ -1613,7 +1612,7 @@
     def close():
         """Close the handle on this repository."""
 
-    def peer():
+    def peer(path=None):
         """Obtain an object conforming to the ``peer`` interface."""
 
     def unfiltered():
--- a/mercurial/localrepo.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/localrepo.py	Thu Mar 02 22:45:44 2023 +0100
@@ -10,11 +10,16 @@
 import functools
 import os
 import random
+import re
 import sys
 import time
 import weakref
 
 from concurrent import futures
+from typing import (
+    Optional,
+)
+
 from .i18n import _
 from .node import (
     bin,
@@ -37,7 +42,6 @@
     commit,
     context,
     dirstate,
-    dirstateguard,
     discovery,
     encoding,
     error,
@@ -96,6 +100,10 @@
 urlerr = util.urlerr
 urlreq = util.urlreq
 
+RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
+    b"^((dirstate|narrowspec.dirstate).*|branch$)"
+)
+
 # set of (path, vfs-location) tuples. vfs-location is:
 # - 'plain for vfs relative paths
 # - '' for svfs relative paths
@@ -299,13 +307,12 @@
 class localpeer(repository.peer):
     '''peer for a local repo; reflects only the most recent API'''
 
-    def __init__(self, repo, caps=None):
-        super(localpeer, self).__init__()
+    def __init__(self, repo, caps=None, path=None):
+        super(localpeer, self).__init__(repo.ui, path=path)
 
         if caps is None:
             caps = moderncaps.copy()
         self._repo = repo.filtered(b'served')
-        self.ui = repo.ui
 
         if repo._wanted_sidedata:
             formatted = bundle2.format_remote_wanted_sidedata(repo)
@@ -321,9 +328,6 @@
     def local(self):
         return self._repo
 
-    def peer(self):
-        return self
-
     def canpush(self):
         return True
 
@@ -451,8 +455,8 @@
     """peer extension which implements legacy methods too; used for tests with
     restricted capabilities"""
 
-    def __init__(self, repo):
-        super(locallegacypeer, self).__init__(repo, caps=legacycaps)
+    def __init__(self, repo, path=None):
+        super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
 
     # Begin of baselegacywirecommands interface.
 
@@ -526,7 +530,7 @@
     return set(read(b'requires').splitlines())
 
 
-def makelocalrepository(baseui, path, intents=None):
+def makelocalrepository(baseui, path: bytes, intents=None):
     """Create a local repository object.
 
     Given arguments needed to construct a local repository, this function
@@ -612,7 +616,6 @@
     # to be reshared
     hint = _(b"see `hg help config.format.use-share-safe` for more information")
     if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
-
         if (
             shared
             and requirementsmod.SHARESAFE_REQUIREMENT
@@ -845,7 +848,13 @@
     )
 
 
-def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
+def loadhgrc(
+    ui,
+    wdirvfs: vfsmod.vfs,
+    hgvfs: vfsmod.vfs,
+    requirements,
+    sharedvfs: Optional[vfsmod.vfs] = None,
+):
     """Load hgrc files/content into a ui instance.
 
     This is called during repository opening to load any additional
@@ -1058,6 +1067,8 @@
         options[b'revlogv2'] = True
     if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
         options[b'changelogv2'] = True
+        cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
+        options[b'changelogv2.compute-rank'] = cmp_rank
 
     if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
         options[b'generaldelta'] = True
@@ -1071,6 +1082,11 @@
         b'storage', b'revlog.optimize-delta-parent-choice'
     )
     options[b'deltabothparents'] = deltabothparents
+    dps_cgds = ui.configint(
+        b'storage',
+        b'revlog.delta-parent-search.candidate-group-chunk-size',
+    )
+    options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
     options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
 
     issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
@@ -1311,8 +1327,6 @@
         # XXX cache is a complicatged business someone
         # should investigate this in depth at some point
         b'cache/',
-        # XXX shouldn't be dirstate covered by the wlock?
-        b'dirstate',
         # XXX bisect was still a bit too messy at the time
         # this changeset was introduced. Someone should fix
         # the remainig bit and drop this line
@@ -1323,15 +1337,15 @@
         self,
         baseui,
         ui,
-        origroot,
-        wdirvfs,
-        hgvfs,
+        origroot: bytes,
+        wdirvfs: vfsmod.vfs,
+        hgvfs: vfsmod.vfs,
         requirements,
         supportedrequirements,
-        sharedpath,
+        sharedpath: bytes,
         store,
-        cachevfs,
-        wcachevfs,
+        cachevfs: vfsmod.vfs,
+        wcachevfs: vfsmod.vfs,
         features,
         intents=None,
     ):
@@ -1453,9 +1467,13 @@
         # - bookmark changes
         self.filteredrevcache = {}
 
+        self._dirstate = None
         # post-dirstate-status hooks
         self._postdsstatus = []
 
+        self._pending_narrow_pats = None
+        self._pending_narrow_pats_dirstate = None
+
         # generic mapping between names and nodes
         self.names = namespaces.namespaces()
 
@@ -1620,8 +1638,8 @@
                 parts.pop()
         return False
 
-    def peer(self):
-        return localpeer(self)  # not cached to avoid reference cycle
+    def peer(self, path=None):
+        return localpeer(self, path=path)  # not cached to avoid reference cycle
 
     def unfiltered(self):
         """Return unfiltered version of the repository
@@ -1738,9 +1756,13 @@
     def manifestlog(self):
         return self.store.manifestlog(self, self._storenarrowmatch)
 
-    @repofilecache(b'dirstate')
+    @unfilteredpropertycache
     def dirstate(self):
-        return self._makedirstate()
+        if self._dirstate is None:
+            self._dirstate = self._makedirstate()
+        else:
+            self._dirstate.refresh()
+        return self._dirstate
 
     def _makedirstate(self):
         """Extension point for wrapping the dirstate per-repo."""
@@ -1782,7 +1804,11 @@
 
         A tuple of (includes, excludes).
         """
-        return narrowspec.load(self)
+        # the narrow management should probably move into its own object
+        val = self._pending_narrow_pats
+        if val is None:
+            val = narrowspec.load(self)
+        return val
 
     @storecache(narrowspec.FILENAME)
     def _storenarrowmatch(self):
@@ -1977,7 +2003,7 @@
     def __iter__(self):
         return iter(self.changelog)
 
-    def revs(self, expr, *args):
+    def revs(self, expr: bytes, *args):
         """Find revisions matching a revset.
 
         The revset is specified as a string ``expr`` that may contain
@@ -1993,7 +2019,7 @@
         tree = revsetlang.spectree(expr, *args)
         return revset.makematcher(tree)(self)
 
-    def set(self, expr, *args):
+    def set(self, expr: bytes, *args):
         """Find revisions matching a revset and emit changectx instances.
 
         This is a convenience wrapper around ``revs()`` that iterates the
@@ -2005,7 +2031,7 @@
         for r in self.revs(expr, *args):
             yield self[r]
 
-    def anyrevs(self, specs, user=False, localalias=None):
+    def anyrevs(self, specs: bytes, user=False, localalias=None):
         """Find revisions matching one of the given revsets.
 
         Revset aliases from the configuration are not expanded by default. To
@@ -2030,7 +2056,7 @@
             m = revset.matchany(None, specs, localalias=localalias)
         return m(self)
 
-    def url(self):
+    def url(self) -> bytes:
         return b'file:' + self.root
 
     def hook(self, name, throw=False, **args):
@@ -2108,7 +2134,7 @@
         # writing to the cache), but the rest of Mercurial wants them in
         # local encoding.
         tags = {}
-        for (name, (node, hist)) in alltags.items():
+        for name, (node, hist) in alltags.items():
             if node != self.nullid:
                 tags[encoding.tolocal(name)] = node
         tags[b'tip'] = self.changelog.tip()
@@ -2229,7 +2255,7 @@
             return b'store'
         return None
 
-    def wjoin(self, f, *insidef):
+    def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
         return self.vfs.reljoin(self.root, f, *insidef)
 
     def setparents(self, p1, p2=None):
@@ -2238,17 +2264,17 @@
         self[None].setparents(p1, p2)
         self._quick_access_changeid_invalidate()
 
-    def filectx(self, path, changeid=None, fileid=None, changectx=None):
+    def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
         """changeid must be a changeset revision, if specified.
         fileid can be a file revision or node."""
         return context.filectx(
             self, path, changeid, fileid, changectx=changectx
         )
 
-    def getcwd(self):
+    def getcwd(self) -> bytes:
         return self.dirstate.getcwd()
 
-    def pathto(self, f, cwd=None):
+    def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
         return self.dirstate.pathto(f, cwd)
 
     def _loadfilter(self, filter):
@@ -2300,14 +2326,21 @@
     def adddatafilter(self, name, filter):
         self._datafilters[name] = filter
 
-    def wread(self, filename):
+    def wread(self, filename: bytes) -> bytes:
         if self.wvfs.islink(filename):
             data = self.wvfs.readlink(filename)
         else:
             data = self.wvfs.read(filename)
         return self._filter(self._encodefilterpats, filename, data)
 
-    def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
+    def wwrite(
+        self,
+        filename: bytes,
+        data: bytes,
+        flags: bytes,
+        backgroundclose=False,
+        **kwargs
+    ) -> int:
         """write ``data`` into ``filename`` in the working directory
 
         This returns length of written (maybe decoded) data.
@@ -2325,7 +2358,7 @@
                 self.wvfs.setflags(filename, False, False)
         return len(data)
 
-    def wwritedata(self, filename, data):
+    def wwritedata(self, filename: bytes, data: bytes) -> bytes:
         return self._filter(self._decodefilterpats, filename, data)
 
     def currenttransaction(self):
@@ -2356,6 +2389,21 @@
                 hint=_(b"run 'hg recover' to clean up transaction"),
             )
 
+        # At that point your dirstate should be clean:
+        #
+        # - If you don't have the wlock, why would you still have a dirty
+        #   dirstate ?
+        #
+        # - If you hold the wlock, you should not be opening a transaction in
+        #   the middle of a `distate.changing_*` block. The transaction needs to
+        #   be open before that and wrap the change-context.
+        #
+        # - If you are not within a `dirstate.changing_*` context, why is our
+        #   dirstate dirty?
+        if self.dirstate._dirty:
+            m = "cannot open a transaction with a dirty dirstate"
+            raise error.ProgrammingError(m)
+
         idbase = b"%.40f#%f" % (random.random(), time.time())
         ha = hex(hashutil.sha1(idbase).digest())
         txnid = b'TXN:' + ha
@@ -2512,10 +2560,6 @@
             else:
                 # discard all changes (including ones already written
                 # out) in this transaction
-                narrowspec.restorebackup(self, b'journal.narrowspec')
-                narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
-                repo.dirstate.restorebackup(None, b'journal.dirstate')
-
                 repo.invalidate(clearfilecache=True)
 
         tr = transaction.transaction(
@@ -2612,44 +2656,49 @@
         tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
         self._transref = weakref.ref(tr)
         scmutil.registersummarycallback(self, tr, desc)
+        # This only exist to deal with the need of rollback to have viable
+        # parents at the end of the operation. So backup viable parents at the
+        # time of this operation.
+        #
+        # We only do it when the `wlock` is taken, otherwise other might be
+        # altering the dirstate under us.
+        #
+        # This is really not a great way to do this (first, because we cannot
+        # always do it). There are more viable alternative that exists
+        #
+        # - backing only the working copy parent in a dedicated files and doing
+        #   a clean "keep-update" to them on `hg rollback`.
+        #
+        # - slightly changing the behavior an applying a logic similar to "hg
+        # strip" to pick a working copy destination on `hg rollback`
+        if self.currentwlock() is not None:
+            ds = self.dirstate
+            if not self.vfs.exists(b'branch'):
+                # force a file to be written if None exist
+                ds.setbranch(b'default', None)
+
+            def backup_dirstate(tr):
+                for f in ds.all_file_names():
+                    # hardlink backup is okay because `dirstate` is always
+                    # atomically written and possible data file are append only
+                    # and resistant to trailing data.
+                    tr.addbackup(f, hardlink=True, location=b'plain')
+
+            tr.addvalidator(b'dirstate-backup', backup_dirstate)
         return tr
 
     def _journalfiles(self):
-        first = (
+        return (
             (self.svfs, b'journal'),
-            (self.svfs, b'journal.narrowspec'),
-            (self.vfs, b'journal.narrowspec.dirstate'),
-            (self.vfs, b'journal.dirstate'),
+            (self.vfs, b'journal.desc'),
         )
-        middle = []
-        dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
-        if dirstate_data is not None:
-            middle.append((self.vfs, dirstate_data))
-        end = (
-            (self.vfs, b'journal.branch'),
-            (self.vfs, b'journal.desc'),
-            (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
-            (self.svfs, b'journal.phaseroots'),
-        )
-        return first + tuple(middle) + end
 
     def undofiles(self):
         return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
 
     @unfilteredmethod
     def _writejournal(self, desc):
-        self.dirstate.savebackup(None, b'journal.dirstate')
-        narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
-        narrowspec.savebackup(self, b'journal.narrowspec')
-        self.vfs.write(
-            b"journal.branch", encoding.fromlocal(self.dirstate.branch())
-        )
         self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
-        bookmarksvfs = bookmarks.bookmarksvfs(self)
-        bookmarksvfs.write(
-            b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
-        )
-        self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
 
     def recover(self):
         with self.lock():
@@ -2673,23 +2722,23 @@
                 return False
 
     def rollback(self, dryrun=False, force=False):
-        wlock = lock = dsguard = None
+        wlock = lock = None
         try:
             wlock = self.wlock()
             lock = self.lock()
             if self.svfs.exists(b"undo"):
-                dsguard = dirstateguard.dirstateguard(self, b'rollback')
-
-                return self._rollback(dryrun, force, dsguard)
+                return self._rollback(dryrun, force)
             else:
                 self.ui.warn(_(b"no rollback information available\n"))
                 return 1
         finally:
-            release(dsguard, lock, wlock)
+            release(lock, wlock)
 
     @unfilteredmethod  # Until we get smarter cache management
-    def _rollback(self, dryrun, force, dsguard):
+    def _rollback(self, dryrun, force):
         ui = self.ui
+
+        parents = self.dirstate.parents()
         try:
             args = self.vfs.read(b'undo.desc').splitlines()
             (oldlen, desc, detail) = (int(args[0]), args[1], None)
@@ -2706,9 +2755,11 @@
                 msg = _(
                     b'repository tip rolled back to revision %d (undo %s)\n'
                 ) % (oldtip, desc)
+            parentgone = any(self[p].rev() > oldtip for p in parents)
         except IOError:
             msg = _(b'rolling back unknown transaction\n')
             desc = None
+            parentgone = True
 
         if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
             raise error.Abort(
@@ -2723,41 +2774,31 @@
         if dryrun:
             return 0
 
-        parents = self.dirstate.parents()
         self.destroying()
         vfsmap = {b'plain': self.vfs, b'': self.svfs}
+        skip_journal_pattern = None
+        if not parentgone:
+            skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
         transaction.rollback(
-            self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
+            self.svfs,
+            vfsmap,
+            b'undo',
+            ui.warn,
+            checkambigfiles=_cachedfiles,
+            skip_journal_pattern=skip_journal_pattern,
         )
-        bookmarksvfs = bookmarks.bookmarksvfs(self)
-        if bookmarksvfs.exists(b'undo.bookmarks'):
-            bookmarksvfs.rename(
-                b'undo.bookmarks', b'bookmarks', checkambig=True
-            )
-        if self.svfs.exists(b'undo.phaseroots'):
-            self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
         self.invalidate()
-
-        has_node = self.changelog.index.has_node
-        parentgone = any(not has_node(p) for p in parents)
+        self.dirstate.invalidate()
+
         if parentgone:
-            # prevent dirstateguard from overwriting already restored one
-            dsguard.close()
-
-            narrowspec.restorebackup(self, b'undo.narrowspec')
-            narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
-            self.dirstate.restorebackup(None, b'undo.dirstate')
-            try:
-                branch = self.vfs.read(b'undo.branch')
-                self.dirstate.setbranch(encoding.tolocal(branch))
-            except IOError:
-                ui.warn(
-                    _(
-                        b'named branch could not be reset: '
-                        b'current branch is still \'%s\'\n'
-                    )
-                    % self.dirstate.branch()
-                )
+            # replace this with some explicit parent update in the future.
+            has_node = self.changelog.index.has_node
+            if not all(has_node(p) for p in self.dirstate._pl):
+                # There was no dirstate to backup initially, we need to drop
+                # the existing one.
+                with self.dirstate.changing_parents(self):
+                    self.dirstate.setparents(self.nullid)
+                    self.dirstate.clear()
 
             parents = tuple([p.rev() for p in self[None].parents()])
             if len(parents) > 1:
@@ -2880,7 +2921,6 @@
                 filtered.branchmap().write(filtered)
 
     def invalidatecaches(self):
-
         if '_tagscache' in vars(self):
             # can't use delattr on proxy
             del self.__dict__['_tagscache']
@@ -2903,13 +2943,9 @@
         rereads the dirstate. Use dirstate.invalidate() if you want to
         explicitly read the dirstate again (i.e. restoring it to a previous
         known good state)."""
-        if hasunfilteredcache(self, 'dirstate'):
-            for k in self.dirstate._filecache:
-                try:
-                    delattr(self.dirstate, k)
-                except AttributeError:
-                    pass
-            delattr(self.unfiltered(), 'dirstate')
+        unfi = self.unfiltered()
+        if 'dirstate' in unfi.__dict__:
+            del unfi.__dict__['dirstate']
 
     def invalidate(self, clearfilecache=False):
         """Invalidates both store and non-store parts other than dirstate
@@ -2921,9 +2957,6 @@
         """
         unfiltered = self.unfiltered()  # all file caches are stored unfiltered
         for k in list(self._filecache.keys()):
-            # dirstate is invalidated separately in invalidatedirstate()
-            if k == b'dirstate':
-                continue
             if (
                 k == b'changelog'
                 and self.currenttransaction()
@@ -3052,12 +3085,19 @@
                 self.ui.develwarn(b'"wlock" acquired after "lock"')
 
         def unlock():
-            if self.dirstate.pendingparentchange():
+            if self.dirstate.is_changing_any:
+                msg = b"wlock release in the middle of a changing parents"
+                self.ui.develwarn(msg)
                 self.dirstate.invalidate()
             else:
+                if self.dirstate._dirty:
+                    msg = b"dirty dirstate on wlock release"
+                    self.ui.develwarn(msg)
                 self.dirstate.write(None)
 
-            self._filecache[b'dirstate'].refresh()
+            unfi = self.unfiltered()
+            if 'dirstate' in unfi.__dict__:
+                del unfi.__dict__['dirstate']
 
         l = self._lock(
             self.vfs,
@@ -3520,14 +3560,13 @@
     return a
 
 
-def undoname(fn):
+def undoname(fn: bytes) -> bytes:
     base, name = os.path.split(fn)
     assert name.startswith(b'journal')
     return os.path.join(base, name.replace(b'journal', b'undo', 1))
 
 
-def instance(ui, path, create, intents=None, createopts=None):
-
+def instance(ui, path: bytes, create, intents=None, createopts=None):
     # prevent cyclic import localrepo -> upgrade -> localrepo
     from . import upgrade
 
@@ -3543,7 +3582,7 @@
     return repo
 
 
-def islocal(path):
+def islocal(path: bytes) -> bool:
     return True
 
 
@@ -3803,7 +3842,7 @@
     return {k: v for k, v in createopts.items() if k not in known}
 
 
-def createrepository(ui, path, createopts=None, requirements=None):
+def createrepository(ui, path: bytes, createopts=None, requirements=None):
     """Create a new repository in a vfs.
 
     ``path`` path to the new repo's working directory.
--- a/mercurial/logexchange.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/logexchange.py	Thu Mar 02 22:45:44 2023 +0100
@@ -113,7 +113,7 @@
     if local:
         rpath = util.pconvert(remote._repo.root)
     elif not isinstance(remote, bytes):
-        rpath = remote._url
+        rpath = remote.url()
 
     # represent the remotepath with user defined path name if exists
     for path, url in repo.ui.configitems(b'paths'):
--- a/mercurial/manifest.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/manifest.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1836,6 +1836,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         return self._revlog.emitrevisions(
             nodes,
@@ -1844,6 +1845,7 @@
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
     def addgroup(
@@ -1854,6 +1856,8 @@
         alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
+        debug_info=None,
+        delta_base_reuse_policy=None,
     ):
         return self._revlog.addgroup(
             deltas,
@@ -1862,6 +1866,8 @@
             alwayscache=alwayscache,
             addrevisioncb=addrevisioncb,
             duplicaterevisioncb=duplicaterevisioncb,
+            debug_info=debug_info,
+            delta_base_reuse_policy=delta_base_reuse_policy,
         )
 
     def rawsize(self, rev):
--- a/mercurial/match.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/match.py	Thu Mar 02 22:45:44 2023 +0100
@@ -368,7 +368,7 @@
                     % (
                         pat,
                         inst.message,
-                    )  # pytype: disable=unsupported-operands
+                    )
                 )
             except IOError as inst:
                 if warn:
--- a/mercurial/mdiff.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/mdiff.py	Thu Mar 02 22:45:44 2023 +0100
@@ -94,6 +94,13 @@
         opts.update(kwargs)
         return diffopts(**opts)
 
+    def __bytes__(self):
+        return b", ".join(
+            b"%s: %r" % (k, getattr(self, k)) for k in self.defaults
+        )
+
+    __str__ = encoding.strmethod(__bytes__)
+
 
 defaultopts = diffopts()
 
--- a/mercurial/merge.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/merge.py	Thu Mar 02 22:45:44 2023 +0100
@@ -46,7 +46,7 @@
     return config
 
 
-def _checkunknownfile(repo, wctx, mctx, f, f2=None):
+def _checkunknownfile(dirstate, wvfs, dircache, wctx, mctx, f, f2=None):
     if wctx.isinmemory():
         # Nothing to do in IMM because nothing in the "working copy" can be an
         # unknown file.
@@ -58,9 +58,8 @@
     if f2 is None:
         f2 = f
     return (
-        repo.wvfs.audit.check(f)
-        and repo.wvfs.isfileorlink(f)
-        and repo.dirstate.normalize(f) not in repo.dirstate
+        wvfs.isfileorlink_checkdir(dircache, f)
+        and dirstate.normalize(f) not in dirstate
         and mctx[f2].cmp(wctx[f])
     )
 
@@ -136,6 +135,9 @@
     pathconfig = repo.ui.configbool(
         b'experimental', b'merge.checkpathconflicts'
     )
+    dircache = dict()
+    dirstate = repo.dirstate
+    wvfs = repo.wvfs
     if not force:
 
         def collectconflicts(conflicts, config):
@@ -151,7 +153,7 @@
                 mergestatemod.ACTION_DELETED_CHANGED,
             )
         ):
-            if _checkunknownfile(repo, wctx, mctx, f):
+            if _checkunknownfile(dirstate, wvfs, dircache, wctx, mctx, f):
                 fileconflicts.add(f)
             elif pathconfig and f not in wctx:
                 path = checkunknowndirs(repo, wctx, f)
@@ -160,7 +162,9 @@
         for f, args, msg in mresult.getactions(
             [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
         ):
-            if _checkunknownfile(repo, wctx, mctx, f, args[0]):
+            if _checkunknownfile(
+                dirstate, wvfs, dircache, wctx, mctx, f, args[0]
+            ):
                 fileconflicts.add(f)
 
         allconflicts = fileconflicts | pathconflicts
@@ -173,7 +177,9 @@
             mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
         ):
             fl2, anc = args
-            different = _checkunknownfile(repo, wctx, mctx, f)
+            different = _checkunknownfile(
+                dirstate, wvfs, dircache, wctx, mctx, f
+            )
             if repo.dirstate._ignore(f):
                 config = ignoredconfig
             else:
@@ -240,16 +246,21 @@
         else:
             repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
 
-    for f, args, msg in list(
-        mresult.getactions([mergestatemod.ACTION_CREATED])
-    ):
+    def transformargs(f, args):
         backup = (
             f in fileconflicts
-            or f in pathconflicts
-            or any(p in pathconflicts for p in pathutil.finddirs(f))
+            or pathconflicts
+            and (
+                f in pathconflicts
+                or any(p in pathconflicts for p in pathutil.finddirs(f))
+            )
         )
         (flags,) = args
-        mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
+        return (flags, backup)
+
+    mresult.mapaction(
+        mergestatemod.ACTION_CREATED, mergestatemod.ACTION_GET, transformargs
+    )
 
 
 def _forgetremoved(wctx, mctx, branchmerge, mresult):
@@ -581,6 +592,18 @@
         self._filemapping[filename] = (action, data, message)
         self._actionmapping[action][filename] = (data, message)
 
+    def mapaction(self, actionfrom, actionto, transform):
+        """changes all occurrences of action `actionfrom` into `actionto`,
+        transforming its args with the function `transform`.
+        """
+        orig = self._actionmapping[actionfrom]
+        del self._actionmapping[actionfrom]
+        dest = self._actionmapping[actionto]
+        for f, (data, msg) in orig.items():
+            data = transform(f, data)
+            self._filemapping[f] = (actionto, data, msg)
+            dest[f] = (data, msg)
+
     def getfile(self, filename, default_return=None):
         """returns (action, args, msg) about this file
 
@@ -1142,6 +1165,8 @@
             followcopies,
         )
         _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
+        if repo.ui.configbool(b'devel', b'debug.abort-update'):
+            exit(1)
 
     else:  # only when merge.preferancestor=* - the default
         repo.ui.note(
@@ -2130,7 +2155,7 @@
             assert len(getfiledata) == (
                 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
             )
-            with repo.dirstate.parentchange():
+            with repo.dirstate.changing_parents(repo):
                 ### Filter Filedata
                 #
                 # We gathered "cache" information for the clean file while
@@ -2204,7 +2229,9 @@
                 util.unlink(repo.vfs.join(b'updatestate'))
 
                 if not branchmerge:
-                    repo.dirstate.setbranch(p2.branch())
+                    repo.dirstate.setbranch(
+                        p2.branch(), repo.currenttransaction()
+                    )
 
                 # If we're updating to a location, clean up any stale temporary includes
                 # (ex: this happens during hg rebase --abort).
@@ -2352,7 +2379,7 @@
         # fix up dirstate for copies and renames
         copies.graftcopies(wctx, ctx, base)
     else:
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             repo.setparents(pctx.node(), pother)
             repo.dirstate.write(repo.currenttransaction())
             # fix up dirstate for copies and renames
--- a/mercurial/narrowspec.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/narrowspec.py	Thu Mar 02 22:45:44 2023 +0100
@@ -5,6 +5,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
+import weakref
 
 from .i18n import _
 from .pycompat import getattr
@@ -13,9 +14,9 @@
     match as matchmod,
     merge,
     mergestate as mergestatemod,
-    requirements,
     scmutil,
     sparse,
+    txnutil,
     util,
 )
 
@@ -169,60 +170,84 @@
 def load(repo):
     # Treat "narrowspec does not exist" the same as "narrowspec file exists
     # and is empty".
-    spec = repo.svfs.tryread(FILENAME)
+    spec = None
+    if txnutil.mayhavepending(repo.root):
+        pending_path = b"%s.pending" % FILENAME
+        if repo.svfs.exists(pending_path):
+            spec = repo.svfs.tryread(FILENAME)
+    if spec is None:
+        spec = repo.svfs.tryread(FILENAME)
     return parseconfig(repo.ui, spec)
 
 
 def save(repo, includepats, excludepats):
+    repo = repo.unfiltered()
+
     validatepatterns(includepats)
     validatepatterns(excludepats)
     spec = format(includepats, excludepats)
-    repo.svfs.write(FILENAME, spec)
+
+    tr = repo.currenttransaction()
+    if tr is None:
+        m = "changing narrow spec outside of a transaction"
+        raise error.ProgrammingError(m)
+    else:
+        # the roundtrip is sometime different
+        # not taking any chance for now
+        value = parseconfig(repo.ui, spec)
+        reporef = weakref.ref(repo)
+
+        def clean_pending(tr):
+            r = reporef()
+            if r is not None:
+                r._pending_narrow_pats = None
+
+        tr.addpostclose(b'narrow-spec', clean_pending)
+        tr.addabort(b'narrow-spec', clean_pending)
+        repo._pending_narrow_pats = value
+
+        def write_spec(f):
+            f.write(spec)
+
+        tr.addfilegenerator(
+            # XXX think about order at some point
+            b"narrow-spec",
+            (FILENAME,),
+            write_spec,
+            location=b'store',
+        )
 
 
 def copytoworkingcopy(repo):
-    spec = repo.svfs.read(FILENAME)
-    repo.vfs.write(DIRSTATE_FILENAME, spec)
-
+    repo = repo.unfiltered()
+    tr = repo.currenttransaction()
+    spec = format(*repo.narrowpats)
+    if tr is None:
+        m = "changing narrow spec outside of a transaction"
+        raise error.ProgrammingError(m)
+    else:
 
-def savebackup(repo, backupname):
-    if requirements.NARROW_REQUIREMENT not in repo.requirements:
-        return
-    svfs = repo.svfs
-    svfs.tryunlink(backupname)
-    util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
+        reporef = weakref.ref(repo)
 
-
-def restorebackup(repo, backupname):
-    if requirements.NARROW_REQUIREMENT not in repo.requirements:
-        return
-    util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
-
+        def clean_pending(tr):
+            r = reporef()
+            if r is not None:
+                r._pending_narrow_pats_dirstate = None
 
-def savewcbackup(repo, backupname):
-    if requirements.NARROW_REQUIREMENT not in repo.requirements:
-        return
-    vfs = repo.vfs
-    vfs.tryunlink(backupname)
-    # It may not exist in old repos
-    if vfs.exists(DIRSTATE_FILENAME):
-        util.copyfile(
-            vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
-        )
+        tr.addpostclose(b'narrow-spec-dirstate', clean_pending)
+        tr.addabort(b'narrow-spec-dirstate', clean_pending)
+        repo._pending_narrow_pats_dirstate = repo.narrowpats
 
+        def write_spec(f):
+            f.write(spec)
 
-def restorewcbackup(repo, backupname):
-    if requirements.NARROW_REQUIREMENT not in repo.requirements:
-        return
-    # It may not exist in old repos
-    if repo.vfs.exists(backupname):
-        util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
-
-
-def clearwcbackup(repo, backupname):
-    if requirements.NARROW_REQUIREMENT not in repo.requirements:
-        return
-    repo.vfs.tryunlink(backupname)
+        tr.addfilegenerator(
+            # XXX think about order at some point
+            b"narrow-spec-dirstate",
+            (DIRSTATE_FILENAME,),
+            write_spec,
+            location=b'plain',
+        )
 
 
 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
@@ -296,8 +321,11 @@
     # Avoid infinite recursion when updating the working copy
     if getattr(repo, '_updatingnarrowspec', False):
         return
-    storespec = repo.svfs.tryread(FILENAME)
-    wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
+    storespec = repo.narrowpats
+    wcspec = repo._pending_narrow_pats_dirstate
+    if wcspec is None:
+        oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
+        wcspec = parseconfig(repo.ui, oldspec)
     if wcspec != storespec:
         raise error.StateError(
             _(b"working copy's narrowspec is stale"),
@@ -311,21 +339,30 @@
     When assumeclean=True, files that are not known to be clean will also
     be deleted. It is then up to the caller to make sure they are clean.
     """
-    oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
-    newspec = repo.svfs.tryread(FILENAME)
+    old = repo._pending_narrow_pats_dirstate
+    if old is None:
+        oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
+        oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
+    else:
+        oldincludes, oldexcludes = old
+    newincludes, newexcludes = repo.narrowpats
     repo._updatingnarrowspec = True
 
-    oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
-    newincludes, newexcludes = parseconfig(repo.ui, newspec)
     oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
     newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
     addedmatch = matchmod.differencematcher(newmatch, oldmatch)
     removedmatch = matchmod.differencematcher(oldmatch, newmatch)
 
+    assert repo.currentwlock() is not None
     ds = repo.dirstate
-    lookup, status, _mtime_boundary = ds.status(
-        removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
-    )
+    with ds.running_status(repo):
+        lookup, status, _mtime_boundary = ds.status(
+            removedmatch,
+            subrepos=[],
+            ignored=True,
+            clean=True,
+            unknown=True,
+        )
     trackeddirty = status.modified + status.added
     clean = status.clean
     if assumeclean:
--- a/mercurial/patch.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/patch.py	Thu Mar 02 22:45:44 2023 +0100
@@ -570,22 +570,23 @@
         self.changed.add(fname)
 
     def close(self):
-        wctx = self.repo[None]
-        changed = set(self.changed)
-        for src, dst in self.copied:
-            scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
-        if self.removed:
-            wctx.forget(sorted(self.removed))
-            for f in self.removed:
-                if f not in self.repo.dirstate:
-                    # File was deleted and no longer belongs to the
-                    # dirstate, it was probably marked added then
-                    # deleted, and should not be considered by
-                    # marktouched().
-                    changed.discard(f)
-        if changed:
-            scmutil.marktouched(self.repo, changed, self.similarity)
-        return sorted(self.changed)
+        with self.repo.dirstate.changing_files(self.repo):
+            wctx = self.repo[None]
+            changed = set(self.changed)
+            for src, dst in self.copied:
+                scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
+            if self.removed:
+                wctx.forget(sorted(self.removed))
+                for f in self.removed:
+                    if f not in self.repo.dirstate:
+                        # File was deleted and no longer belongs to the
+                        # dirstate, it was probably marked added then
+                        # deleted, and should not be considered by
+                        # marktouched().
+                        changed.discard(f)
+            if changed:
+                scmutil.marktouched(self.repo, changed, self.similarity)
+            return sorted(self.changed)
 
 
 class filestore:
--- a/mercurial/pathutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/pathutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -4,6 +4,13 @@
 import posixpath
 import stat
 
+from typing import (
+    Any,
+    Callable,
+    Iterator,
+    Optional,
+)
+
 from .i18n import _
 from . import (
     encoding,
@@ -13,15 +20,6 @@
     util,
 )
 
-if pycompat.TYPE_CHECKING:
-    from typing import (
-        Any,
-        Callable,
-        Iterator,
-        Optional,
-    )
-
-
 rustdirs = policy.importrust('dirstate', 'Dirs')
 parsers = policy.importmod('parsers')
 
@@ -56,7 +54,7 @@
 
     def __init__(self, root, callback=None, realfs=True, cached=False):
         self.audited = set()
-        self.auditeddir = set()
+        self.auditeddir = dict()
         self.root = root
         self._realfs = realfs
         self._cached = cached
@@ -72,8 +70,7 @@
         path may contain a pattern (e.g. foodir/**.txt)"""
 
         path = util.localpath(path)
-        normpath = self.normcase(path)
-        if normpath in self.audited:
+        if path in self.audited:
             return
         # AIX ignores "/" at end of path, others raise EISDIR.
         if util.endswithsep(path):
@@ -90,13 +87,14 @@
                 _(b"path contains illegal component: %s") % path
             )
         # Windows shortname aliases
-        for p in parts:
-            if b"~" in p:
-                first, last = p.split(b"~", 1)
-                if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
-                    raise error.InputError(
-                        _(b"path contains illegal component: %s") % path
-                    )
+        if b"~" in path:
+            for p in parts:
+                if b"~" in p:
+                    first, last = p.split(b"~", 1)
+                    if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
+                        raise error.InputError(
+                            _(b"path contains illegal component: %s") % path
+                        )
         if b'.hg' in _lowerclean(path):
             lparts = [_lowerclean(p) for p in parts]
             for p in b'.hg', b'.hg.':
@@ -108,36 +106,43 @@
                         % (path, pycompat.bytestr(base))
                     )
 
-        normparts = util.splitpath(normpath)
-        assert len(parts) == len(normparts)
-
-        parts.pop()
-        normparts.pop()
-        # It's important that we check the path parts starting from the root.
-        # We don't want to add "foo/bar/baz" to auditeddir before checking if
-        # there's a "foo/.hg" directory. This also means we won't accidentally
-        # traverse a symlink into some other filesystem (which is potentially
-        # expensive to access).
-        for i in range(len(parts)):
-            prefix = pycompat.ossep.join(parts[: i + 1])
-            normprefix = pycompat.ossep.join(normparts[: i + 1])
-            if normprefix in self.auditeddir:
-                continue
-            if self._realfs:
-                self._checkfs(prefix, path)
-            if self._cached:
-                self.auditeddir.add(normprefix)
+        if self._realfs:
+            # It's important that we check the path parts starting from the root.
+            # We don't want to add "foo/bar/baz" to auditeddir before checking if
+            # there's a "foo/.hg" directory. This also means we won't accidentally
+            # traverse a symlink into some other filesystem (which is potentially
+            # expensive to access).
+            for prefix in finddirs_rev_noroot(path):
+                if prefix in self.auditeddir:
+                    res = self.auditeddir[prefix]
+                else:
+                    res = pathauditor._checkfs_exists(
+                        self.root, prefix, path, self.callback
+                    )
+                    if self._cached:
+                        self.auditeddir[prefix] = res
+                if not res:
+                    break
 
         if self._cached:
-            self.audited.add(normpath)
+            self.audited.add(path)
 
-    def _checkfs(self, prefix, path):
-        # type: (bytes, bytes) -> None
-        """raise exception if a file system backed check fails"""
-        curpath = os.path.join(self.root, prefix)
+    @staticmethod
+    def _checkfs_exists(
+        root,
+        prefix: bytes,
+        path: bytes,
+        callback: Optional[Callable[[bytes], bool]] = None,
+    ):
+        """raise exception if a file system backed check fails.
+
+        Return a bool that indicates that the directory (or file) exists."""
+        curpath = os.path.join(root, prefix)
         try:
             st = os.lstat(curpath)
         except OSError as err:
+            if err.errno == errno.ENOENT:
+                return False
             # EINVAL can be raised as invalid path syntax under win32.
             # They must be ignored for patterns can be checked too.
             if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
@@ -152,9 +157,10 @@
             elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
                 os.path.join(curpath, b'.hg')
             ):
-                if not self.callback or not self.callback(curpath):
+                if not callback or not callback(curpath):
                     msg = _(b"path '%s' is inside nested repo %r")
                     raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
+        return True
 
     def check(self, path):
         # type: (bytes) -> bool
@@ -314,6 +320,13 @@
     yield b''
 
 
+def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]:
+    pos = path.find(pycompat.ossep)
+    while pos != -1:
+        yield path[:pos]
+        pos = path.find(pycompat.ossep, pos + 1)
+
+
 class dirs:
     '''a multiset of directory names from a set of file paths'''
 
--- a/mercurial/policy.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/policy.py	Thu Mar 02 22:45:44 2023 +0100
@@ -76,7 +76,7 @@
     ('cext', 'bdiff'): 3,
     ('cext', 'mpatch'): 1,
     ('cext', 'osutil'): 4,
-    ('cext', 'parsers'): 20,
+    ('cext', 'parsers'): 21,
 }
 
 # map import request to other package or module
--- a/mercurial/posix.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/posix.py	Thu Mar 02 22:45:44 2023 +0100
@@ -17,8 +17,23 @@
 import stat
 import sys
 import tempfile
+import typing
 import unicodedata
 
+from typing import (
+    Any,
+    AnyStr,
+    Iterable,
+    Iterator,
+    List,
+    Match,
+    NoReturn,
+    Optional,
+    Sequence,
+    Tuple,
+    Union,
+)
+
 from .i18n import _
 from .pycompat import (
     getattr,
@@ -44,7 +59,7 @@
     # vaguely unix-like but don't have hardlink support. For those
     # poor souls, just say we tried and that it failed so we fall back
     # to copies.
-    def oslink(src, dst):
+    def oslink(src: bytes, dst: bytes) -> NoReturn:
         raise OSError(
             errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst)
         )
@@ -54,15 +69,47 @@
 unlink = os.unlink
 rename = os.rename
 removedirs = os.removedirs
-expandglobs = False
+
+if typing.TYPE_CHECKING:
+    # Replace the various overloads that come along with aliasing stdlib methods
+    # with the narrow definition that we care about in the type checking phase
+    # only.  This ensures that both Windows and POSIX see only the definition
+    # that is actually available.
+    #
+    # Note that if we check pycompat.TYPE_CHECKING here, it is always False, and
+    # the methods aren't replaced.
+
+    def normpath(path: bytes) -> bytes:
+        raise NotImplementedError
+
+    def abspath(path: AnyStr) -> AnyStr:
+        raise NotImplementedError
 
-umask = os.umask(0)
+    def oslink(src: bytes, dst: bytes) -> None:
+        raise NotImplementedError
+
+    def readlink(path: bytes) -> bytes:
+        raise NotImplementedError
+
+    def unlink(path: bytes) -> None:
+        raise NotImplementedError
+
+    def rename(src: bytes, dst: bytes) -> None:
+        raise NotImplementedError
+
+    def removedirs(name: bytes) -> None:
+        raise NotImplementedError
+
+
+expandglobs: bool = False
+
+umask: int = os.umask(0)
 os.umask(umask)
 
 posixfile = open
 
 
-def split(p):
+def split(p: bytes) -> Tuple[bytes, bytes]:
     """Same as posixpath.split, but faster
 
     >>> import posixpath
@@ -85,17 +132,17 @@
     return ht[0] + b'/', ht[1]
 
 
-def openhardlinks():
+def openhardlinks() -> bool:
     '''return true if it is safe to hold open file handles to hardlinks'''
     return True
 
 
-def nlinks(name):
+def nlinks(name: bytes) -> int:
     '''return number of hardlinks for the given file'''
     return os.lstat(name).st_nlink
 
 
-def parsepatchoutput(output_line):
+def parsepatchoutput(output_line: bytes) -> bytes:
     """parses the output produced by patch and returns the filename"""
     pf = output_line[14:]
     if pycompat.sysplatform == b'OpenVMS':
@@ -107,7 +154,9 @@
     return pf
 
 
-def sshargs(sshcmd, host, user, port):
+def sshargs(
+    sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes]
+) -> bytes:
     '''Build argument list for ssh'''
     args = user and (b"%s@%s" % (user, host)) or host
     if b'-' in args[:1]:
@@ -120,12 +169,12 @@
     return args
 
 
-def isexec(f):
+def isexec(f: bytes) -> bool:
     """check whether a file is executable"""
     return os.lstat(f).st_mode & 0o100 != 0
 
 
-def setflags(f, l, x):
+def setflags(f: bytes, l: bool, x: bool) -> None:
     st = os.lstat(f)
     s = st.st_mode
     if l:
@@ -169,7 +218,12 @@
         os.chmod(f, s & 0o666)
 
 
-def copymode(src, dst, mode=None, enforcewritable=False):
+def copymode(
+    src: bytes,
+    dst: bytes,
+    mode: Optional[bytes] = None,
+    enforcewritable: bool = False,
+) -> None:
     """Copy the file mode from the file at path src to dst.
     If src doesn't exist, we're using mode instead. If mode is None, we're
     using umask."""
@@ -189,7 +243,7 @@
     os.chmod(dst, new_mode)
 
 
-def checkexec(path):
+def checkexec(path: bytes) -> bool:
     """
     Check whether the given path is on a filesystem with UNIX-like exec flags
 
@@ -230,7 +284,7 @@
             else:
                 # checkisexec exists, check if it actually is exec
                 if m & EXECFLAGS != 0:
-                    # ensure checkisexec exists, check it isn't exec
+                    # ensure checknoexec exists, check it isn't exec
                     try:
                         m = os.stat(checknoexec).st_mode
                     except FileNotFoundError:
@@ -269,7 +323,7 @@
         return False
 
 
-def checklink(path):
+def checklink(path: bytes) -> bool:
     """check whether the given path is on a symlink-capable filesystem"""
     # mktemp is not racy because symlink creation will fail if the
     # file already exists
@@ -334,13 +388,13 @@
             return False
 
 
-def checkosfilename(path):
+def checkosfilename(path: bytes) -> Optional[bytes]:
     """Check that the base-relative path is a valid filename on this platform.
     Returns None if the path is ok, or a UI string describing the problem."""
     return None  # on posix platforms, every path is ok
 
 
-def getfsmountpoint(dirpath):
+def getfsmountpoint(dirpath: bytes) -> Optional[bytes]:
     """Get the filesystem mount point from a directory (best-effort)
 
     Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
@@ -348,7 +402,7 @@
     return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
 
 
-def getfstype(dirpath):
+def getfstype(dirpath: bytes) -> Optional[bytes]:
     """Get the filesystem type name from a directory (best-effort)
 
     Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
@@ -356,29 +410,29 @@
     return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
 
 
-def get_password():
+def get_password() -> bytes:
     return encoding.strtolocal(getpass.getpass(''))
 
 
-def setbinary(fd):
+def setbinary(fd) -> None:
     pass
 
 
-def pconvert(path):
+def pconvert(path: bytes) -> bytes:
     return path
 
 
-def localpath(path):
+def localpath(path: bytes) -> bytes:
     return path
 
 
-def samefile(fpath1, fpath2):
+def samefile(fpath1: bytes, fpath2: bytes) -> bool:
     """Returns whether path1 and path2 refer to the same file. This is only
     guaranteed to work for files, not directories."""
     return os.path.samefile(fpath1, fpath2)
 
 
-def samedevice(fpath1, fpath2):
+def samedevice(fpath1: bytes, fpath2: bytes) -> bool:
     """Returns whether fpath1 and fpath2 are on the same device. This is only
     guaranteed to work for files, not directories."""
     st1 = os.lstat(fpath1)
@@ -387,18 +441,18 @@
 
 
 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
-def normcase(path):
+def normcase(path: bytes) -> bytes:
     return path.lower()
 
 
 # what normcase does to ASCII strings
-normcasespec = encoding.normcasespecs.lower
+normcasespec: int = encoding.normcasespecs.lower
 # fallback normcase function for non-ASCII strings
 normcasefallback = normcase
 
 if pycompat.isdarwin:
 
-    def normcase(path):
+    def normcase(path: bytes) -> bytes:
         """
         Normalize a filename for OS X-compatible comparison:
         - escape-encode invalid characters
@@ -423,7 +477,7 @@
 
     normcasespec = encoding.normcasespecs.lower
 
-    def normcasefallback(path):
+    def normcasefallback(path: bytes) -> bytes:
         try:
             u = path.decode('utf-8')
         except UnicodeDecodeError:
@@ -464,7 +518,7 @@
     )
 
     # use upper-ing as normcase as same as NTFS workaround
-    def normcase(path):
+    def normcase(path: bytes) -> bytes:
         pathlen = len(path)
         if (pathlen == 0) or (path[0] != pycompat.ossep):
             # treat as relative
@@ -490,20 +544,20 @@
     # but these translations are not supported by native
     # tools, so the exec bit tends to be set erroneously.
     # Therefore, disable executable bit access on Cygwin.
-    def checkexec(path):
+    def checkexec(path: bytes) -> bool:
         return False
 
     # Similarly, Cygwin's symlink emulation is likely to create
     # problems when Mercurial is used from both Cygwin and native
     # Windows, with other native tools, or on shared volumes
-    def checklink(path):
+    def checklink(path: bytes) -> bool:
         return False
 
 
-_needsshellquote = None
+_needsshellquote: Optional[Match[bytes]] = None
 
 
-def shellquote(s):
+def shellquote(s: bytes) -> bytes:
     if pycompat.sysplatform == b'OpenVMS':
         return b'"%s"' % s
     global _needsshellquote
@@ -516,12 +570,12 @@
         return b"'%s'" % s.replace(b"'", b"'\\''")
 
 
-def shellsplit(s):
+def shellsplit(s: bytes) -> List[bytes]:
     """Parse a command string in POSIX shell way (best-effort)"""
     return pycompat.shlexsplit(s, posix=True)
 
 
-def testpid(pid):
+def testpid(pid: int) -> bool:
     '''return False if pid dead, True if running or not sure'''
     if pycompat.sysplatform == b'OpenVMS':
         return True
@@ -532,12 +586,12 @@
         return inst.errno != errno.ESRCH
 
 
-def isowner(st):
+def isowner(st: os.stat_result) -> bool:
     """Return True if the stat object st is from the current user."""
     return st.st_uid == os.getuid()
 
 
-def findexe(command):
+def findexe(command: bytes) -> Optional[bytes]:
     """Find executable for command searching like which does.
     If command is a basename then PATH is searched for command.
     PATH isn't searched if command is an absolute or relative path.
@@ -545,7 +599,7 @@
     if pycompat.sysplatform == b'OpenVMS':
         return command
 
-    def findexisting(executable):
+    def findexisting(executable: bytes) -> Optional[bytes]:
         b'Will return executable if existing file'
         if os.path.isfile(executable) and os.access(executable, os.X_OK):
             return executable
@@ -564,14 +618,14 @@
     return None
 
 
-def setsignalhandler():
+def setsignalhandler() -> None:
     pass
 
 
 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
 
 
-def statfiles(files):
+def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]:
     """Stat each file in files. Yield each stat, or None if a file does not
     exist or has a type we don't care about."""
     lstat = os.lstat
@@ -586,12 +640,12 @@
         yield st
 
 
-def getuser():
+def getuser() -> bytes:
     '''return name of current user'''
     return pycompat.fsencode(getpass.getuser())
 
 
-def username(uid=None):
+def username(uid: Optional[int] = None) -> Optional[bytes]:
     """Return the name of the user with the given uid.
 
     If uid is None, return the name of the current user."""
@@ -604,7 +658,7 @@
         return b'%d' % uid
 
 
-def groupname(gid=None):
+def groupname(gid: Optional[int] = None) -> Optional[bytes]:
     """Return the name of the group with the given gid.
 
     If gid is None, return the name of the current group."""
@@ -617,7 +671,7 @@
         return pycompat.bytestr(gid)
 
 
-def groupmembers(name):
+def groupmembers(name: bytes) -> List[bytes]:
     """Return the list of members of the group with the given
     name, KeyError if the group does not exist.
     """
@@ -625,23 +679,27 @@
     return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem))
 
 
-def spawndetached(args):
+def spawndetached(args: List[bytes]) -> int:
     return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args)
 
 
-def gethgcmd():
+def gethgcmd():  # TODO: convert to bytes, like on Windows?
     return sys.argv[:1]
 
 
-def makedir(path, notindexed):
+def makedir(path: bytes, notindexed: bool) -> None:
     os.mkdir(path)
 
 
-def lookupreg(key, name=None, scope=None):
+def lookupreg(
+    key: bytes,
+    name: Optional[bytes] = None,
+    scope: Optional[Union[int, Iterable[int]]] = None,
+) -> Optional[bytes]:
     return None
 
 
-def hidewindow():
+def hidewindow() -> None:
     """Hide current shell window.
 
     Used to hide the window opened when starting asynchronous
@@ -651,15 +709,15 @@
 
 
 class cachestat:
-    def __init__(self, path):
+    def __init__(self, path: bytes) -> None:
         self.stat = os.stat(path)
 
-    def cacheable(self):
+    def cacheable(self) -> bool:
         return bool(self.stat.st_ino)
 
     __hash__ = object.__hash__
 
-    def __eq__(self, other):
+    def __eq__(self, other: Any) -> bool:
         try:
             # Only dev, ino, size, mtime and atime are likely to change. Out
             # of these, we shouldn't compare atime but should compare the
@@ -680,18 +738,18 @@
         except AttributeError:
             return False
 
-    def __ne__(self, other):
+    def __ne__(self, other: Any) -> bool:
         return not self == other
 
 
-def statislink(st):
+def statislink(st: Optional[os.stat_result]) -> bool:
     '''check whether a stat result is a symlink'''
-    return st and stat.S_ISLNK(st.st_mode)
+    return stat.S_ISLNK(st.st_mode) if st else False
 
 
-def statisexec(st):
+def statisexec(st: Optional[os.stat_result]) -> bool:
     '''check whether a stat result is an executable file'''
-    return st and (st.st_mode & 0o100 != 0)
+    return (st.st_mode & 0o100 != 0) if st else False
 
 
 def poll(fds):
@@ -708,7 +766,7 @@
     return sorted(list(set(sum(res, []))))
 
 
-def readpipe(pipe):
+def readpipe(pipe) -> bytes:
     """Read all available data from a pipe."""
     # We can't fstat() a pipe because Linux will always report 0.
     # So, we set the pipe to non-blocking mode and read everything
@@ -733,7 +791,7 @@
         fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
 
 
-def bindunixsocket(sock, path):
+def bindunixsocket(sock, path: bytes) -> None:
     """Bind the UNIX domain socket to the specified path"""
     # use relative path instead of full path at bind() if possible, since
     # AF_UNIX path has very small length limit (107 chars) on common
--- a/mercurial/pure/bdiff.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/pure/bdiff.py	Thu Mar 02 22:45:44 2023 +0100
@@ -10,8 +10,13 @@
 import re
 import struct
 
+from typing import (
+    List,
+    Tuple,
+)
 
-def splitnewlines(text):
+
+def splitnewlines(text: bytes) -> List[bytes]:
     '''like str.splitlines, but only split on newlines.'''
     lines = [l + b'\n' for l in text.split(b'\n')]
     if lines:
@@ -22,7 +27,9 @@
     return lines
 
 
-def _normalizeblocks(a, b, blocks):
+def _normalizeblocks(
+    a: List[bytes], b: List[bytes], blocks
+) -> List[Tuple[int, int, int]]:
     prev = None
     r = []
     for curr in blocks:
@@ -57,7 +64,7 @@
     return r
 
 
-def bdiff(a, b):
+def bdiff(a: bytes, b: bytes) -> bytes:
     a = bytes(a).splitlines(True)
     b = bytes(b).splitlines(True)
 
@@ -84,7 +91,7 @@
     return b"".join(bin)
 
 
-def blocks(a, b):
+def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]:
     an = splitnewlines(a)
     bn = splitnewlines(b)
     d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
@@ -92,7 +99,7 @@
     return [(i, i + n, j, j + n) for (i, j, n) in d]
 
 
-def fixws(text, allws):
+def fixws(text: bytes, allws: bool) -> bytes:
     if allws:
         text = re.sub(b'[ \t\r]+', b'', text)
     else:
--- a/mercurial/pure/mpatch.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/pure/mpatch.py	Thu Mar 02 22:45:44 2023 +0100
@@ -9,6 +9,11 @@
 import io
 import struct
 
+from typing import (
+    List,
+    Tuple,
+)
+
 
 stringio = io.BytesIO
 
@@ -28,7 +33,9 @@
 # temporary string buffers.
 
 
-def _pull(dst, src, l):  # pull l bytes from src
+def _pull(
+    dst: List[Tuple[int, int]], src: List[Tuple[int, int]], l: int
+) -> None:  # pull l bytes from src
     while l:
         f = src.pop()
         if f[0] > l:  # do we need to split?
@@ -39,7 +46,7 @@
         l -= f[0]
 
 
-def _move(m, dest, src, count):
+def _move(m: stringio, dest: int, src: int, count: int) -> None:
     """move count bytes from src to dest
 
     The file pointer is left at the end of dest.
@@ -50,7 +57,9 @@
     m.write(buf)
 
 
-def _collect(m, buf, list):
+def _collect(
+    m: stringio, buf: int, list: List[Tuple[int, int]]
+) -> Tuple[int, int]:
     start = buf
     for l, p in reversed(list):
         _move(m, buf, p, l)
@@ -58,7 +67,7 @@
     return (buf - start, start)
 
 
-def patches(a, bins):
+def patches(a: bytes, bins: List[bytes]) -> bytes:
     if not bins:
         return a
 
@@ -111,7 +120,7 @@
     return m.read(t[0])
 
 
-def patchedsize(orig, delta):
+def patchedsize(orig: int, delta: bytes) -> int:
     outlen, last, bin = 0, 0, 0
     binend = len(delta)
     data = 12
--- a/mercurial/pure/parsers.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/pure/parsers.py	Thu Mar 02 22:45:44 2023 +0100
@@ -435,6 +435,11 @@
         return self._wc_tracked and not (self._p1_tracked or self._p2_info)
 
     @property
+    def modified(self):
+        """True if the file has been modified"""
+        return self._wc_tracked and self._p1_tracked and self._p2_info
+
+    @property
     def maybe_clean(self):
         """True if the file has a chance to be in the "clean" state"""
         if not self._wc_tracked:
--- a/mercurial/pycompat.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/pycompat.py	Thu Mar 02 22:45:44 2023 +0100
@@ -28,6 +28,25 @@
 import tempfile
 import xmlrpc.client as xmlrpclib
 
+from typing import (
+    Any,
+    AnyStr,
+    BinaryIO,
+    Callable,
+    Dict,
+    Iterable,
+    Iterator,
+    List,
+    Mapping,
+    NoReturn,
+    Optional,
+    Sequence,
+    Tuple,
+    Type,
+    TypeVar,
+    cast,
+    overload,
+)
 
 ispy3 = sys.version_info[0] >= 3
 ispypy = '__pypy__' in sys.builtin_module_names
@@ -38,6 +57,12 @@
 
     TYPE_CHECKING = typing.TYPE_CHECKING
 
+_GetOptResult = Tuple[List[Tuple[bytes, bytes]], List[bytes]]
+_T0 = TypeVar('_T0')
+_T1 = TypeVar('_T1')
+_S = TypeVar('_S')
+_Tbytestr = TypeVar('_Tbytestr', bound='bytestr')
+
 
 def future_set_exception_info(f, exc_info):
     f.set_exception(exc_info[0])
@@ -46,7 +71,7 @@
 FileNotFoundError = builtins.FileNotFoundError
 
 
-def identity(a):
+def identity(a: _T0) -> _T0:
     return a
 
 
@@ -94,28 +119,37 @@
 
 fsencode = os.fsencode
 fsdecode = os.fsdecode
-oscurdir = os.curdir.encode('ascii')
-oslinesep = os.linesep.encode('ascii')
-osname = os.name.encode('ascii')
-ospathsep = os.pathsep.encode('ascii')
-ospardir = os.pardir.encode('ascii')
-ossep = os.sep.encode('ascii')
-osaltsep = os.altsep
-if osaltsep:
-    osaltsep = osaltsep.encode('ascii')
-osdevnull = os.devnull.encode('ascii')
+oscurdir: bytes = os.curdir.encode('ascii')
+oslinesep: bytes = os.linesep.encode('ascii')
+osname: bytes = os.name.encode('ascii')
+ospathsep: bytes = os.pathsep.encode('ascii')
+ospardir: bytes = os.pardir.encode('ascii')
+ossep: bytes = os.sep.encode('ascii')
+osaltsep: Optional[bytes] = os.altsep.encode('ascii') if os.altsep else None
+osdevnull: bytes = os.devnull.encode('ascii')
 
-sysplatform = sys.platform.encode('ascii')
-sysexecutable = sys.executable
-if sysexecutable:
-    sysexecutable = os.fsencode(sysexecutable)
+sysplatform: bytes = sys.platform.encode('ascii')
+sysexecutable: bytes = os.fsencode(sys.executable) if sys.executable else b''
 
 
-def maplist(*args):
-    return list(map(*args))
+if TYPE_CHECKING:
+
+    @overload
+    def maplist(f: Callable[[_T0], _S], arg: Iterable[_T0]) -> List[_S]:
+        ...
+
+    @overload
+    def maplist(
+        f: Callable[[_T0, _T1], _S], arg1: Iterable[_T0], arg2: Iterable[_T1]
+    ) -> List[_S]:
+        ...
 
 
-def rangelist(*args):
+def maplist(f, *args):
+    return list(map(f, *args))
+
+
+def rangelist(*args) -> List[int]:
     return list(range(*args))
 
 
@@ -128,7 +162,7 @@
 
 long = int
 
-if getattr(sys, 'argv', None) is not None:
+if builtins.getattr(sys, 'argv', None) is not None:
     # On POSIX, the char** argv array is converted to Python str using
     # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which
     # isn't directly callable from Python code. In practice, os.fsencode()
@@ -143,6 +177,7 @@
     # (this is how Python 2 worked). To get that, we encode with the mbcs
     # encoding, which will pass CP_ACP to the underlying Windows API to
     # produce bytes.
+    sysargv: List[bytes] = []
     if os.name == r'nt':
         sysargv = [a.encode("mbcs", "ignore") for a in sys.argv]
     else:
@@ -211,38 +246,53 @@
     # https://github.com/google/pytype/issues/500
     if TYPE_CHECKING:
 
-        def __init__(self, s=b''):
+        def __init__(self, s: object = b'') -> None:
             pass
 
-    def __new__(cls, s=b''):
+    def __new__(cls: Type[_Tbytestr], s: object = b'') -> _Tbytestr:
         if isinstance(s, bytestr):
             return s
         if not isinstance(
             s, (bytes, bytearray)
-        ) and not hasattr(  # hasattr-py3-only
+        ) and not builtins.hasattr(  # hasattr-py3-only
             s, u'__bytes__'
         ):
             s = str(s).encode('ascii')
         return bytes.__new__(cls, s)
 
-    def __getitem__(self, key):
+    # The base class uses `int` return in py3, but the point of this class is to
+    # behave like py2.
+    def __getitem__(self, key) -> bytes:  # pytype: disable=signature-mismatch
         s = bytes.__getitem__(self, key)
         if not isinstance(s, bytes):
             s = bytechr(s)
         return s
 
-    def __iter__(self):
+    # The base class expects `Iterator[int]` return in py3, but the point of
+    # this class is to behave like py2.
+    def __iter__(self) -> Iterator[bytes]:  # pytype: disable=signature-mismatch
         return iterbytestr(bytes.__iter__(self))
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return bytes.__repr__(self)[1:]  # drop b''
 
 
-def iterbytestr(s):
+def iterbytestr(s: Iterable[int]) -> Iterator[bytes]:
     """Iterate bytes as if it were a str object of Python 2"""
     return map(bytechr, s)
 
 
+if TYPE_CHECKING:
+
+    @overload
+    def maybebytestr(s: bytes) -> bytestr:
+        ...
+
+    @overload
+    def maybebytestr(s: _T0) -> _T0:
+        ...
+
+
 def maybebytestr(s):
     """Promote bytes to bytestr"""
     if isinstance(s, bytes):
@@ -250,7 +300,7 @@
     return s
 
 
-def sysbytes(s):
+def sysbytes(s: AnyStr) -> bytes:
     """Convert an internal str (e.g. keyword, __doc__) back to bytes
 
     This never raises UnicodeEncodeError, but only ASCII characters
@@ -261,7 +311,7 @@
     return s.encode('utf-8')
 
 
-def sysstr(s):
+def sysstr(s: AnyStr) -> str:
     """Return a keyword str to be passed to Python functions such as
     getattr() and str.encode()
 
@@ -274,29 +324,29 @@
     return s.decode('latin-1')
 
 
-def strurl(url):
+def strurl(url: AnyStr) -> str:
     """Converts a bytes url back to str"""
     if isinstance(url, bytes):
         return url.decode('ascii')
     return url
 
 
-def bytesurl(url):
+def bytesurl(url: AnyStr) -> bytes:
     """Converts a str url to bytes by encoding in ascii"""
     if isinstance(url, str):
         return url.encode('ascii')
     return url
 
 
-def raisewithtb(exc, tb):
+def raisewithtb(exc: BaseException, tb) -> NoReturn:
     """Raise exception with the given traceback"""
     raise exc.with_traceback(tb)
 
 
-def getdoc(obj):
+def getdoc(obj: object) -> Optional[bytes]:
     """Get docstring as bytes; may be None so gettext() won't confuse it
     with _('')"""
-    doc = getattr(obj, '__doc__', None)
+    doc = builtins.getattr(obj, '__doc__', None)
     if doc is None:
         return doc
     return sysbytes(doc)
@@ -319,14 +369,22 @@
 unicode = str
 
 
-def open(name, mode=b'r', buffering=-1, encoding=None):
+def open(
+    name,
+    mode: AnyStr = b'r',
+    buffering: int = -1,
+    encoding: Optional[str] = None,
+) -> Any:
+    # TODO: assert binary mode, and cast result to BinaryIO?
     return builtins.open(name, sysstr(mode), buffering, encoding)
 
 
 safehasattr = _wrapattrfunc(builtins.hasattr)
 
 
-def _getoptbwrapper(orig, args, shortlist, namelist):
+def _getoptbwrapper(
+    orig, args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes]
+) -> _GetOptResult:
     """
     Takes bytes arguments, converts them to unicode, pass them to
     getopt.getopt(), convert the returned values back to bytes and then
@@ -342,7 +400,7 @@
     return opts, args
 
 
-def strkwargs(dic):
+def strkwargs(dic: Mapping[bytes, _T0]) -> Dict[str, _T0]:
     """
     Converts the keys of a python dictonary to str i.e. unicodes so that
     they can be passed as keyword arguments as dictionaries with bytes keys
@@ -352,7 +410,7 @@
     return dic
 
 
-def byteskwargs(dic):
+def byteskwargs(dic: Mapping[str, _T0]) -> Dict[bytes, _T0]:
     """
     Converts keys of python dictionaries to bytes as they were converted to
     str to pass that dictonary as a keyword argument on Python 3.
@@ -362,7 +420,9 @@
 
 
 # TODO: handle shlex.shlex().
-def shlexsplit(s, comments=False, posix=True):
+def shlexsplit(
+    s: bytes, comments: bool = False, posix: bool = True
+) -> List[bytes]:
     """
     Takes bytes argument, convert it to str i.e. unicodes, pass that into
     shlex.split(), convert the returned value to bytes and return that for
@@ -377,46 +437,59 @@
 
 json_loads = json.loads
 
-isjython = sysplatform.startswith(b'java')
+isjython: bool = sysplatform.startswith(b'java')
 
-isdarwin = sysplatform.startswith(b'darwin')
-islinux = sysplatform.startswith(b'linux')
-isposix = osname == b'posix'
-iswindows = osname == b'nt'
+isdarwin: bool = sysplatform.startswith(b'darwin')
+islinux: bool = sysplatform.startswith(b'linux')
+isposix: bool = osname == b'posix'
+iswindows: bool = osname == b'nt'
 
 
-def getoptb(args, shortlist, namelist):
+def getoptb(
+    args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes]
+) -> _GetOptResult:
     return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
 
 
-def gnugetoptb(args, shortlist, namelist):
+def gnugetoptb(
+    args: Sequence[bytes], shortlist: bytes, namelist: Sequence[bytes]
+) -> _GetOptResult:
     return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
 
 
-def mkdtemp(suffix=b'', prefix=b'tmp', dir=None):
+def mkdtemp(
+    suffix: bytes = b'', prefix: bytes = b'tmp', dir: Optional[bytes] = None
+) -> bytes:
     return tempfile.mkdtemp(suffix, prefix, dir)
 
 
 # text=True is not supported; use util.from/tonativeeol() instead
-def mkstemp(suffix=b'', prefix=b'tmp', dir=None):
+def mkstemp(
+    suffix: bytes = b'', prefix: bytes = b'tmp', dir: Optional[bytes] = None
+) -> Tuple[int, bytes]:
     return tempfile.mkstemp(suffix, prefix, dir)
 
 
 # TemporaryFile does not support an "encoding=" argument on python2.
 # This wrapper file are always open in byte mode.
-def unnamedtempfile(mode=None, *args, **kwargs):
+def unnamedtempfile(mode: Optional[bytes] = None, *args, **kwargs) -> BinaryIO:
     if mode is None:
         mode = 'w+b'
     else:
         mode = sysstr(mode)
     assert 'b' in mode
-    return tempfile.TemporaryFile(mode, *args, **kwargs)
+    return cast(BinaryIO, tempfile.TemporaryFile(mode, *args, **kwargs))
 
 
 # NamedTemporaryFile does not support an "encoding=" argument on python2.
 # This wrapper file are always open in byte mode.
 def namedtempfile(
-    mode=b'w+b', bufsize=-1, suffix=b'', prefix=b'tmp', dir=None, delete=True
+    mode: bytes = b'w+b',
+    bufsize: int = -1,
+    suffix: bytes = b'',
+    prefix: bytes = b'tmp',
+    dir: Optional[bytes] = None,
+    delete: bool = True,
 ):
     mode = sysstr(mode)
     assert 'b' in mode
--- a/mercurial/revlog.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revlog.py	Thu Mar 02 22:45:44 2023 +0100
@@ -38,12 +38,15 @@
     COMP_MODE_DEFAULT,
     COMP_MODE_INLINE,
     COMP_MODE_PLAIN,
+    DELTA_BASE_REUSE_NO,
+    DELTA_BASE_REUSE_TRY,
     ENTRY_RANK,
     FEATURES_BY_VERSION,
     FLAG_GENERALDELTA,
     FLAG_INLINE_DATA,
     INDEX_HEADER,
     KIND_CHANGELOG,
+    KIND_FILELOG,
     RANK_UNKNOWN,
     REVLOGV0,
     REVLOGV1,
@@ -125,7 +128,7 @@
 # Aliased for performance.
 _zlibdecompress = zlib.decompress
 
-# max size of revlog with inline data
+# max size of inline data embedded into a revlog
 _maxinline = 131072
 
 # Flag processors for REVIDX_ELLIPSIS.
@@ -347,6 +350,7 @@
         self._chunkcachesize = 65536
         self._maxchainlen = None
         self._deltabothparents = True
+        self._candidate_group_chunk_size = 0
         self._debug_delta = False
         self.index = None
         self._docket = None
@@ -363,6 +367,11 @@
         self._srdensitythreshold = 0.50
         self._srmingapsize = 262144
 
+        # other optionnals features
+
+        # might remove rank configuration once the computation has no impact
+        self._compute_rank = False
+
         # Make copy of flag processors so each revlog instance can support
         # custom flags.
         self._flagprocessors = dict(flagutil.flagprocessors)
@@ -404,6 +413,7 @@
 
         if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
             new_header = CHANGELOGV2
+            self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
         elif b'revlogv2' in opts:
             new_header = REVLOGV2
         elif b'revlogv1' in opts:
@@ -421,6 +431,9 @@
             self._maxchainlen = opts[b'maxchainlen']
         if b'deltabothparents' in opts:
             self._deltabothparents = opts[b'deltabothparents']
+        dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
+        if dps_cgds:
+            self._candidate_group_chunk_size = dps_cgds
         self._lazydelta = bool(opts.get(b'lazydelta', True))
         self._lazydeltabase = False
         if self._lazydelta:
@@ -505,7 +518,6 @@
             self._docket = docket
             self._docket_file = entry_point
         else:
-            entry_data = b''
             self._initempty = True
             entry_data = self._get_data(entry_point, mmapindexthreshold)
             if len(entry_data) > 0:
@@ -653,9 +665,12 @@
     @util.propertycache
     def display_id(self):
         """The public facing "ID" of the revlog that we use in message"""
-        # Maybe we should build a user facing representation of
-        # revlog.target instead of using `self.radix`
-        return self.radix
+        if self.revlog_kind == KIND_FILELOG:
+            # Reference the file without the "data/" prefix, so it is familiar
+            # to the user.
+            return self.target[1]
+        else:
+            return self.radix
 
     def _get_decompressor(self, t):
         try:
@@ -2445,6 +2460,16 @@
                 self, write_debug=write_debug
             )
 
+        if cachedelta is not None and len(cachedelta) == 2:
+            # If the cached delta has no information about how it should be
+            # reused, add the default reuse instruction according to the
+            # revlog's configuration.
+            if self._generaldelta and self._lazydeltabase:
+                delta_base_reuse = DELTA_BASE_REUSE_TRY
+            else:
+                delta_base_reuse = DELTA_BASE_REUSE_NO
+            cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
+
         revinfo = revlogutils.revisioninfo(
             node,
             p1,
@@ -2492,7 +2517,7 @@
             sidedata_offset = 0
 
         rank = RANK_UNKNOWN
-        if self._format_version == CHANGELOGV2:
+        if self._compute_rank:
             if (p1r, p2r) == (nullrev, nullrev):
                 rank = 1
             elif p1r != nullrev and p2r == nullrev:
@@ -2637,6 +2662,8 @@
         alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
+        debug_info=None,
+        delta_base_reuse_policy=None,
     ):
         """
         add a delta group
@@ -2652,6 +2679,14 @@
         if self._adding_group:
             raise error.ProgrammingError(b'cannot nest addgroup() calls')
 
+        # read the default delta-base reuse policy from revlog config if the
+        # group did not specify one.
+        if delta_base_reuse_policy is None:
+            if self._generaldelta and self._lazydeltabase:
+                delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
+            else:
+                delta_base_reuse_policy = DELTA_BASE_REUSE_NO
+
         self._adding_group = True
         empty = True
         try:
@@ -2662,6 +2697,7 @@
                 deltacomputer = deltautil.deltacomputer(
                     self,
                     write_debug=write_debug,
+                    debug_info=debug_info,
                 )
                 # loop through our set of deltas
                 for data in deltas:
@@ -2731,7 +2767,7 @@
                         p1,
                         p2,
                         flags,
-                        (baserev, delta),
+                        (baserev, delta, delta_base_reuse_policy),
                         alwayscache=alwayscache,
                         deltacomputer=deltacomputer,
                         sidedata=sidedata,
@@ -2886,6 +2922,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         if nodesorder not in (b'nodes', b'storage', b'linear', None):
             raise error.ProgrammingError(
@@ -2915,6 +2952,7 @@
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
     DELTAREUSEALWAYS = b'always'
--- a/mercurial/revlogutils/__init__.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revlogutils/__init__.py	Thu Mar 02 22:45:44 2023 +0100
@@ -67,7 +67,7 @@
     node:       expected hash of the revision
     p1, p2:     parent revs of the revision
     btext:      built text cache consisting of a one-element list
-    cachedelta: (baserev, uncompressed_delta) or None
+    cachedelta: (baserev, uncompressed_delta, usage_mode) or None
     flags:      flags associated to the revision storage
 
     One of btext[0] or cachedelta must be set.
--- a/mercurial/revlogutils/constants.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revlogutils/constants.py	Thu Mar 02 22:45:44 2023 +0100
@@ -301,3 +301,18 @@
 
 
 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
+
+### What should be done with a cached delta and its base ?
+
+# Ignore the cache when considering candidates.
+#
+# The cached delta might be used, but the delta base will not be scheduled for
+# usage earlier than in "normal" order.
+DELTA_BASE_REUSE_NO = 0
+
+# Prioritize trying the cached delta base
+#
+# The delta base will be tested for validy first. So that the cached deltas get
+# used when possible.
+DELTA_BASE_REUSE_TRY = 1
+DELTA_BASE_REUSE_FORCE = 2
--- a/mercurial/revlogutils/debug.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revlogutils/debug.py	Thu Mar 02 22:45:44 2023 +0100
@@ -6,12 +6,19 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
+import collections
+import string
+
 from .. import (
+    mdiff,
     node as nodemod,
+    revlogutils,
+    util,
 )
 
 from . import (
     constants,
+    deltas as deltautil,
 )
 
 INDEX_ENTRY_DEBUG_COLUMN = []
@@ -216,3 +223,499 @@
         fm.plain(b'\n')
 
     fm.end()
+
+
+def dump(ui, revlog):
+    """perform the work for `hg debugrevlog --dump"""
+    # XXX seems redundant with debug index ?
+    r = revlog
+    numrevs = len(r)
+    ui.write(
+        (
+            b"# rev p1rev p2rev start   end deltastart base   p1   p2"
+            b" rawsize totalsize compression heads chainlen\n"
+        )
+    )
+    ts = 0
+    heads = set()
+
+    for rev in range(numrevs):
+        dbase = r.deltaparent(rev)
+        if dbase == -1:
+            dbase = rev
+        cbase = r.chainbase(rev)
+        clen = r.chainlen(rev)
+        p1, p2 = r.parentrevs(rev)
+        rs = r.rawsize(rev)
+        ts = ts + rs
+        heads -= set(r.parentrevs(rev))
+        heads.add(rev)
+        try:
+            compression = ts / r.end(rev)
+        except ZeroDivisionError:
+            compression = 0
+        ui.write(
+            b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
+            b"%11d %5d %8d\n"
+            % (
+                rev,
+                p1,
+                p2,
+                r.start(rev),
+                r.end(rev),
+                r.start(dbase),
+                r.start(cbase),
+                r.start(p1),
+                r.start(p2),
+                rs,
+                ts,
+                compression,
+                len(heads),
+                clen,
+            )
+        )
+
+
+def debug_revlog(ui, revlog):
+    """code for `hg debugrevlog`"""
+    r = revlog
+    format = r._format_version
+    v = r._format_flags
+    flags = []
+    gdelta = False
+    if v & constants.FLAG_INLINE_DATA:
+        flags.append(b'inline')
+    if v & constants.FLAG_GENERALDELTA:
+        gdelta = True
+        flags.append(b'generaldelta')
+    if not flags:
+        flags = [b'(none)']
+
+    ### the total size of stored content if incompressed.
+    full_text_total_size = 0
+    ### tracks merge vs single parent
+    nummerges = 0
+
+    ### tracks ways the "delta" are build
+    # nodelta
+    numempty = 0
+    numemptytext = 0
+    numemptydelta = 0
+    # full file content
+    numfull = 0
+    # intermediate snapshot against a prior snapshot
+    numsemi = 0
+    # snapshot count per depth
+    numsnapdepth = collections.defaultdict(lambda: 0)
+    # number of snapshots with a non-ancestor delta
+    numsnapdepth_nad = collections.defaultdict(lambda: 0)
+    # delta against previous revision
+    numprev = 0
+    # delta against prev, where prev is a non-ancestor
+    numprev_nad = 0
+    # delta against first or second parent (not prev)
+    nump1 = 0
+    nump2 = 0
+    # delta against neither prev nor parents
+    numother = 0
+    # delta against other that is a non-ancestor
+    numother_nad = 0
+    # delta against prev that are also first or second parent
+    # (details of `numprev`)
+    nump1prev = 0
+    nump2prev = 0
+
+    # data about delta chain of each revs
+    chainlengths = []
+    chainbases = []
+    chainspans = []
+
+    # data about each revision
+    datasize = [None, 0, 0]
+    fullsize = [None, 0, 0]
+    semisize = [None, 0, 0]
+    # snapshot count per depth
+    snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
+    deltasize = [None, 0, 0]
+    chunktypecounts = {}
+    chunktypesizes = {}
+
+    def addsize(size, l):
+        if l[0] is None or size < l[0]:
+            l[0] = size
+        if size > l[1]:
+            l[1] = size
+        l[2] += size
+
+    numrevs = len(r)
+    for rev in range(numrevs):
+        p1, p2 = r.parentrevs(rev)
+        delta = r.deltaparent(rev)
+        if format > 0:
+            s = r.rawsize(rev)
+            full_text_total_size += s
+            addsize(s, datasize)
+        if p2 != nodemod.nullrev:
+            nummerges += 1
+        size = r.length(rev)
+        if delta == nodemod.nullrev:
+            chainlengths.append(0)
+            chainbases.append(r.start(rev))
+            chainspans.append(size)
+            if size == 0:
+                numempty += 1
+                numemptytext += 1
+            else:
+                numfull += 1
+                numsnapdepth[0] += 1
+                addsize(size, fullsize)
+                addsize(size, snapsizedepth[0])
+        else:
+            nad = (
+                delta != p1 and delta != p2 and not r.isancestorrev(delta, rev)
+            )
+            chainlengths.append(chainlengths[delta] + 1)
+            baseaddr = chainbases[delta]
+            revaddr = r.start(rev)
+            chainbases.append(baseaddr)
+            chainspans.append((revaddr - baseaddr) + size)
+            if size == 0:
+                numempty += 1
+                numemptydelta += 1
+            elif r.issnapshot(rev):
+                addsize(size, semisize)
+                numsemi += 1
+                depth = r.snapshotdepth(rev)
+                numsnapdepth[depth] += 1
+                if nad:
+                    numsnapdepth_nad[depth] += 1
+                addsize(size, snapsizedepth[depth])
+            else:
+                addsize(size, deltasize)
+                if delta == rev - 1:
+                    numprev += 1
+                    if delta == p1:
+                        nump1prev += 1
+                    elif delta == p2:
+                        nump2prev += 1
+                    elif nad:
+                        numprev_nad += 1
+                elif delta == p1:
+                    nump1 += 1
+                elif delta == p2:
+                    nump2 += 1
+                elif delta != nodemod.nullrev:
+                    numother += 1
+                    numother_nad += 1
+
+        # Obtain data on the raw chunks in the revlog.
+        if util.safehasattr(r, '_getsegmentforrevs'):
+            segment = r._getsegmentforrevs(rev, rev)[1]
+        else:
+            segment = r._revlog._getsegmentforrevs(rev, rev)[1]
+        if segment:
+            chunktype = bytes(segment[0:1])
+        else:
+            chunktype = b'empty'
+
+        if chunktype not in chunktypecounts:
+            chunktypecounts[chunktype] = 0
+            chunktypesizes[chunktype] = 0
+
+        chunktypecounts[chunktype] += 1
+        chunktypesizes[chunktype] += size
+
+    # Adjust size min value for empty cases
+    for size in (datasize, fullsize, semisize, deltasize):
+        if size[0] is None:
+            size[0] = 0
+
+    numdeltas = numrevs - numfull - numempty - numsemi
+    numoprev = numprev - nump1prev - nump2prev - numprev_nad
+    num_other_ancestors = numother - numother_nad
+    totalrawsize = datasize[2]
+    datasize[2] /= numrevs
+    fulltotal = fullsize[2]
+    if numfull == 0:
+        fullsize[2] = 0
+    else:
+        fullsize[2] /= numfull
+    semitotal = semisize[2]
+    snaptotal = {}
+    if numsemi > 0:
+        semisize[2] /= numsemi
+    for depth in snapsizedepth:
+        snaptotal[depth] = snapsizedepth[depth][2]
+        snapsizedepth[depth][2] /= numsnapdepth[depth]
+
+    deltatotal = deltasize[2]
+    if numdeltas > 0:
+        deltasize[2] /= numdeltas
+    totalsize = fulltotal + semitotal + deltatotal
+    avgchainlen = sum(chainlengths) / numrevs
+    maxchainlen = max(chainlengths)
+    maxchainspan = max(chainspans)
+    compratio = 1
+    if totalsize:
+        compratio = totalrawsize / totalsize
+
+    basedfmtstr = b'%%%dd\n'
+    basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
+
+    def dfmtstr(max):
+        return basedfmtstr % len(str(max))
+
+    def pcfmtstr(max, padding=0):
+        return basepcfmtstr % (len(str(max)), b' ' * padding)
+
+    def pcfmt(value, total):
+        if total:
+            return (value, 100 * float(value) / total)
+        else:
+            return value, 100.0
+
+    ui.writenoi18n(b'format : %d\n' % format)
+    ui.writenoi18n(b'flags  : %s\n' % b', '.join(flags))
+
+    ui.write(b'\n')
+    fmt = pcfmtstr(totalsize)
+    fmt2 = dfmtstr(totalsize)
+    ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
+    ui.writenoi18n(b'    merges    : ' + fmt % pcfmt(nummerges, numrevs))
+    ui.writenoi18n(
+        b'    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
+    )
+    ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
+    ui.writenoi18n(b'    empty     : ' + fmt % pcfmt(numempty, numrevs))
+    ui.writenoi18n(
+        b'                   text  : '
+        + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
+    )
+    ui.writenoi18n(
+        b'                   delta : '
+        + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
+    )
+    ui.writenoi18n(
+        b'    snapshot  : ' + fmt % pcfmt(numfull + numsemi, numrevs)
+    )
+    for depth in sorted(numsnapdepth):
+        base = b'      lvl-%-3d :       ' % depth
+        count = fmt % pcfmt(numsnapdepth[depth], numrevs)
+        pieces = [base, count]
+        if numsnapdepth_nad[depth]:
+            pieces[-1] = count = count[:-1]  # drop the final '\n'
+            more = b'  non-ancestor-bases: '
+            anc_count = fmt
+            anc_count %= pcfmt(numsnapdepth_nad[depth], numsnapdepth[depth])
+            pieces.append(more)
+            pieces.append(anc_count)
+        ui.write(b''.join(pieces))
+    ui.writenoi18n(b'    deltas    : ' + fmt % pcfmt(numdeltas, numrevs))
+    ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
+    ui.writenoi18n(
+        b'    snapshot  : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
+    )
+    for depth in sorted(numsnapdepth):
+        ui.write(
+            (b'      lvl-%-3d :       ' % depth)
+            + fmt % pcfmt(snaptotal[depth], totalsize)
+        )
+    ui.writenoi18n(b'    deltas    : ' + fmt % pcfmt(deltatotal, totalsize))
+
+    letters = string.ascii_letters.encode('ascii')
+
+    def fmtchunktype(chunktype):
+        if chunktype == b'empty':
+            return b'    %s     : ' % chunktype
+        elif chunktype in letters:
+            return b'    0x%s (%s)  : ' % (nodemod.hex(chunktype), chunktype)
+        else:
+            return b'    0x%s      : ' % nodemod.hex(chunktype)
+
+    ui.write(b'\n')
+    ui.writenoi18n(b'chunks        : ' + fmt2 % numrevs)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
+    ui.writenoi18n(b'chunks size   : ' + fmt2 % totalsize)
+    for chunktype in sorted(chunktypecounts):
+        ui.write(fmtchunktype(chunktype))
+        ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
+
+    ui.write(b'\n')
+    b_total = b"%d" % full_text_total_size
+    p_total = []
+    while len(b_total) > 3:
+        p_total.append(b_total[-3:])
+        b_total = b_total[:-3]
+    p_total.append(b_total)
+    p_total.reverse()
+    b_total = b' '.join(p_total)
+
+    ui.write(b'\n')
+    ui.writenoi18n(b'total-stored-content: %s bytes\n' % b_total)
+    ui.write(b'\n')
+    fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
+    ui.writenoi18n(b'avg chain length  : ' + fmt % avgchainlen)
+    ui.writenoi18n(b'max chain length  : ' + fmt % maxchainlen)
+    ui.writenoi18n(b'max chain reach   : ' + fmt % maxchainspan)
+    ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
+
+    if format > 0:
+        ui.write(b'\n')
+        ui.writenoi18n(
+            b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
+            % tuple(datasize)
+        )
+    ui.writenoi18n(
+        b'full revision size (min/max/avg)     : %d / %d / %d\n'
+        % tuple(fullsize)
+    )
+    ui.writenoi18n(
+        b'inter-snapshot size (min/max/avg)    : %d / %d / %d\n'
+        % tuple(semisize)
+    )
+    for depth in sorted(snapsizedepth):
+        if depth == 0:
+            continue
+        ui.writenoi18n(
+            b'    level-%-3d (min/max/avg)          : %d / %d / %d\n'
+            % ((depth,) + tuple(snapsizedepth[depth]))
+        )
+    ui.writenoi18n(
+        b'delta size (min/max/avg)             : %d / %d / %d\n'
+        % tuple(deltasize)
+    )
+
+    if numdeltas > 0:
+        ui.write(b'\n')
+        fmt = pcfmtstr(numdeltas)
+        fmt2 = pcfmtstr(numdeltas, 4)
+        ui.writenoi18n(
+            b'deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas)
+        )
+        if numprev > 0:
+            ui.writenoi18n(
+                b'    where prev = p1  : ' + fmt2 % pcfmt(nump1prev, numprev)
+            )
+            ui.writenoi18n(
+                b'    where prev = p2  : ' + fmt2 % pcfmt(nump2prev, numprev)
+            )
+            ui.writenoi18n(
+                b'    other-ancestor   : ' + fmt2 % pcfmt(numoprev, numprev)
+            )
+            ui.writenoi18n(
+                b'    unrelated        : ' + fmt2 % pcfmt(numoprev, numprev)
+            )
+        if gdelta:
+            ui.writenoi18n(
+                b'deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas)
+            )
+            ui.writenoi18n(
+                b'deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas)
+            )
+            ui.writenoi18n(
+                b'deltas against ancs  : '
+                + fmt % pcfmt(num_other_ancestors, numdeltas)
+            )
+            ui.writenoi18n(
+                b'deltas against other : '
+                + fmt % pcfmt(numother_nad, numdeltas)
+            )
+
+
+def debug_delta_find(ui, revlog, rev, base_rev=nodemod.nullrev):
+    """display the search process for a delta"""
+    deltacomputer = deltautil.deltacomputer(
+        revlog,
+        write_debug=ui.write,
+        debug_search=not ui.quiet,
+    )
+
+    node = revlog.node(rev)
+    p1r, p2r = revlog.parentrevs(rev)
+    p1 = revlog.node(p1r)
+    p2 = revlog.node(p2r)
+    full_text = revlog.revision(rev)
+    btext = [full_text]
+    textlen = len(btext[0])
+    cachedelta = None
+    flags = revlog.flags(rev)
+
+    if base_rev != nodemod.nullrev:
+        base_text = revlog.revision(base_rev)
+        delta = mdiff.textdiff(base_text, full_text)
+
+        cachedelta = (base_rev, delta, constants.DELTA_BASE_REUSE_TRY)
+        btext = [None]
+
+    revinfo = revlogutils.revisioninfo(
+        node,
+        p1,
+        p2,
+        btext,
+        textlen,
+        cachedelta,
+        flags,
+    )
+
+    fh = revlog._datafp()
+    deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
+
+
+def _get_revlogs(repo, changelog: bool, manifest: bool, filelogs: bool):
+    """yield revlogs from this repository"""
+    if changelog:
+        yield repo.changelog
+
+    if manifest:
+        # XXX: Handle tree manifest
+        root_mf = repo.manifestlog.getstorage(b'')
+        assert not root_mf._treeondisk
+        yield root_mf._revlog
+
+    if filelogs:
+        files = set()
+        for rev in repo:
+            ctx = repo[rev]
+            files |= set(ctx.files())
+
+        for f in sorted(files):
+            yield repo.file(f)._revlog
+
+
+def debug_revlog_stats(
+    repo, fm, changelog: bool, manifest: bool, filelogs: bool
+):
+    """Format revlog statistics for debugging purposes
+
+    fm: the output formatter.
+    """
+    fm.plain(b'rev-count   data-size inl type      target \n')
+
+    for rlog in _get_revlogs(repo, changelog, manifest, filelogs):
+        fm.startitem()
+        nb_rev = len(rlog)
+        inline = rlog._inline
+        data_size = rlog._get_data_offset(nb_rev - 1)
+
+        target = rlog.target
+        revlog_type = b'unknown'
+        revlog_target = b''
+        if target[0] == constants.KIND_CHANGELOG:
+            revlog_type = b'changelog'
+        elif target[0] == constants.KIND_MANIFESTLOG:
+            revlog_type = b'manifest'
+            revlog_target = target[1]
+        elif target[0] == constants.KIND_FILELOG:
+            revlog_type = b'file'
+            revlog_target = target[1]
+
+        fm.write(b'revlog.rev-count', b'%9d', nb_rev)
+        fm.write(b'revlog.data-size', b'%12d', data_size)
+
+        fm.write(b'revlog.inline', b' %-3s', b'yes' if inline else b'no')
+        fm.write(b'revlog.type', b' %-9s', revlog_type)
+        fm.write(b'revlog.target', b' %s', revlog_target)
+
+        fm.plain(b'\n')
--- a/mercurial/revlogutils/deltas.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revlogutils/deltas.py	Thu Mar 02 22:45:44 2023 +0100
@@ -20,6 +20,8 @@
     COMP_MODE_DEFAULT,
     COMP_MODE_INLINE,
     COMP_MODE_PLAIN,
+    DELTA_BASE_REUSE_FORCE,
+    DELTA_BASE_REUSE_NO,
     KIND_CHANGELOG,
     KIND_FILELOG,
     KIND_MANIFESTLOG,
@@ -576,13 +578,20 @@
     )
 
 
-def isgooddeltainfo(revlog, deltainfo, revinfo):
+def is_good_delta_info(revlog, deltainfo, revinfo):
     """Returns True if the given delta is good. Good means that it is within
     the disk span, disk size, and chain length bounds that we know to be
     performant."""
     if deltainfo is None:
         return False
 
+    if (
+        revinfo.cachedelta is not None
+        and deltainfo.base == revinfo.cachedelta[0]
+        and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
+    ):
+        return True
+
     # - 'deltainfo.distance' is the distance from the base revision --
     #   bounding it limits the amount of I/O we need to do.
     # - 'deltainfo.compresseddeltalen' is the sum of the total size of
@@ -655,7 +664,16 @@
 LIMIT_BASE2TEXT = 500
 
 
-def _candidategroups(revlog, textlen, p1, p2, cachedelta):
+def _candidategroups(
+    revlog,
+    textlen,
+    p1,
+    p2,
+    cachedelta,
+    excluded_bases=None,
+    target_rev=None,
+    snapshot_cache=None,
+):
     """Provides group of revision to be tested as delta base
 
     This top level function focus on emitting groups with unique and worthwhile
@@ -666,15 +684,31 @@
         yield None
         return
 
+    if (
+        cachedelta is not None
+        and nullrev == cachedelta[0]
+        and cachedelta[2] == DELTA_BASE_REUSE_FORCE
+    ):
+        # instruction are to forcibly do a full snapshot
+        yield None
+        return
+
     deltalength = revlog.length
     deltaparent = revlog.deltaparent
     sparse = revlog._sparserevlog
     good = None
 
     deltas_limit = textlen * LIMIT_DELTA2TEXT
+    group_chunk_size = revlog._candidate_group_chunk_size
 
     tested = {nullrev}
-    candidates = _refinedgroups(revlog, p1, p2, cachedelta)
+    candidates = _refinedgroups(
+        revlog,
+        p1,
+        p2,
+        cachedelta,
+        snapshot_cache=snapshot_cache,
+    )
     while True:
         temptative = candidates.send(good)
         if temptative is None:
@@ -694,15 +728,37 @@
             # filter out revision we tested already
             if rev in tested:
                 continue
-            tested.add(rev)
+
+            if (
+                cachedelta is not None
+                and rev == cachedelta[0]
+                and cachedelta[2] == DELTA_BASE_REUSE_FORCE
+            ):
+                # instructions are to forcibly consider/use this delta base
+                group.append(rev)
+                continue
+
+            # an higher authority deamed the base unworthy (e.g. censored)
+            if excluded_bases is not None and rev in excluded_bases:
+                tested.add(rev)
+                continue
+            # We are in some recomputation cases and that rev is too high in
+            # the revlog
+            if target_rev is not None and rev >= target_rev:
+                tested.add(rev)
+                continue
             # filter out delta base that will never produce good delta
             if deltas_limit < revlog.length(rev):
+                tested.add(rev)
                 continue
             if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
+                tested.add(rev)
                 continue
             # no delta for rawtext-changing revs (see "candelta" for why)
             if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
+                tested.add(rev)
                 continue
+
             # If we reach here, we are about to build and test a delta.
             # The delta building process will compute the chaininfo in all
             # case, since that computation is cached, it is fine to access it
@@ -710,9 +766,11 @@
             chainlen, chainsize = revlog._chaininfo(rev)
             # if chain will be too long, skip base
             if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
+                tested.add(rev)
                 continue
             # if chain already have too much data, skip base
             if deltas_limit < chainsize:
+                tested.add(rev)
                 continue
             if sparse and revlog.upperboundcomp is not None:
                 maxcomp = revlog.upperboundcomp
@@ -731,36 +789,46 @@
                     snapshotlimit = textlen >> snapshotdepth
                     if snapshotlimit < lowestrealisticdeltalen:
                         # delta lower bound is larger than accepted upper bound
+                        tested.add(rev)
                         continue
 
                     # check the relative constraint on the delta size
                     revlength = revlog.length(rev)
                     if revlength < lowestrealisticdeltalen:
                         # delta probable lower bound is larger than target base
+                        tested.add(rev)
                         continue
 
             group.append(rev)
         if group:
-            # XXX: in the sparse revlog case, group can become large,
-            #      impacting performances. Some bounding or slicing mecanism
-            #      would help to reduce this impact.
-            good = yield tuple(group)
+            # When the size of the candidate group is big, it can result in a
+            # quite significant performance impact. To reduce this, we can send
+            # them in smaller batches until the new batch does not provide any
+            # improvements.
+            #
+            # This might reduce the overall efficiency of the compression in
+            # some corner cases, but that should also prevent very pathological
+            # cases from being an issue. (eg. 20 000 candidates).
+            #
+            # XXX note that the ordering of the group becomes important as it
+            # now impacts the final result. The current order is unprocessed
+            # and can be improved.
+            if group_chunk_size == 0:
+                tested.update(group)
+                good = yield tuple(group)
+            else:
+                prev_good = good
+                for start in range(0, len(group), group_chunk_size):
+                    sub_group = group[start : start + group_chunk_size]
+                    tested.update(sub_group)
+                    good = yield tuple(sub_group)
+                    if prev_good == good:
+                        break
+
     yield None
 
 
-def _findsnapshots(revlog, cache, start_rev):
-    """find snapshot from start_rev to tip"""
-    if util.safehasattr(revlog.index, b'findsnapshots'):
-        revlog.index.findsnapshots(cache, start_rev)
-    else:
-        deltaparent = revlog.deltaparent
-        issnapshot = revlog.issnapshot
-        for rev in revlog.revs(start_rev):
-            if issnapshot(rev):
-                cache[deltaparent(rev)].append(rev)
-
-
-def _refinedgroups(revlog, p1, p2, cachedelta):
+def _refinedgroups(revlog, p1, p2, cachedelta, snapshot_cache=None):
     good = None
     # First we try to reuse a the delta contained in the bundle.
     # (or from the source revlog)
@@ -768,15 +836,28 @@
     # This logic only applies to general delta repositories and can be disabled
     # through configuration. Disabling reuse source delta is useful when
     # we want to make sure we recomputed "optimal" deltas.
-    if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
+    debug_info = None
+    if cachedelta is not None and cachedelta[2] > DELTA_BASE_REUSE_NO:
         # Assume what we received from the server is a good choice
         # build delta will reuse the cache
+        if debug_info is not None:
+            debug_info['cached-delta.tested'] += 1
         good = yield (cachedelta[0],)
         if good is not None:
+            if debug_info is not None:
+                debug_info['cached-delta.accepted'] += 1
             yield None
             return
-    snapshots = collections.defaultdict(list)
-    for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
+    if snapshot_cache is None:
+        snapshot_cache = SnapshotCache()
+    groups = _rawgroups(
+        revlog,
+        p1,
+        p2,
+        cachedelta,
+        snapshot_cache,
+    )
+    for candidates in groups:
         good = yield candidates
         if good is not None:
             break
@@ -797,19 +878,22 @@
                 break
             good = yield (base,)
         # refine snapshot up
-        if not snapshots:
-            _findsnapshots(revlog, snapshots, good + 1)
+        if not snapshot_cache.snapshots:
+            snapshot_cache.update(revlog, good + 1)
         previous = None
         while good != previous:
             previous = good
-            children = tuple(sorted(c for c in snapshots[good]))
+            children = tuple(sorted(c for c in snapshot_cache.snapshots[good]))
             good = yield children
 
-    # we have found nothing
+    if debug_info is not None:
+        if good is None:
+            debug_info['no-solution'] += 1
+
     yield None
 
 
-def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
+def _rawgroups(revlog, p1, p2, cachedelta, snapshot_cache=None):
     """Provides group of revision to be tested as delta base
 
     This lower level function focus on emitting delta theorically interresting
@@ -840,9 +924,9 @@
             yield parents
 
     if sparse and parents:
-        if snapshots is None:
-            # map: base-rev: snapshot-rev
-            snapshots = collections.defaultdict(list)
+        if snapshot_cache is None:
+            # map: base-rev: [snapshot-revs]
+            snapshot_cache = SnapshotCache()
         # See if we can use an existing snapshot in the parent chains to use as
         # a base for a new intermediate-snapshot
         #
@@ -856,7 +940,7 @@
                     break
                 parents_snaps[idx].add(s)
         snapfloor = min(parents_snaps[0]) + 1
-        _findsnapshots(revlog, snapshots, snapfloor)
+        snapshot_cache.update(revlog, snapfloor)
         # search for the highest "unrelated" revision
         #
         # Adding snapshots used by "unrelated" revision increase the odd we
@@ -879,14 +963,14 @@
             # chain.
             max_depth = max(parents_snaps.keys())
             chain = deltachain(other)
-            for idx, s in enumerate(chain):
+            for depth, s in enumerate(chain):
                 if s < snapfloor:
                     continue
-                if max_depth < idx:
+                if max_depth < depth:
                     break
                 if not revlog.issnapshot(s):
                     break
-                parents_snaps[idx].add(s)
+                parents_snaps[depth].add(s)
         # Test them as possible intermediate snapshot base
         # We test them from highest to lowest level. High level one are more
         # likely to result in small delta
@@ -894,7 +978,7 @@
         for idx, snaps in sorted(parents_snaps.items(), reverse=True):
             siblings = set()
             for s in snaps:
-                siblings.update(snapshots[s])
+                siblings.update(snapshot_cache.snapshots[s])
             # Before considering making a new intermediate snapshot, we check
             # if an existing snapshot, children of base we consider, would be
             # suitable.
@@ -922,7 +1006,8 @@
         # revisions instead of starting our own. Without such re-use,
         # topological branches would keep reopening new full chains. Creating
         # more and more snapshot as the repository grow.
-        yield tuple(snapshots[nullrev])
+        full = [r for r in snapshot_cache.snapshots[nullrev] if snapfloor <= r]
+        yield tuple(sorted(full))
 
     if not sparse:
         # other approach failed try against prev to hopefully save us a
@@ -930,11 +1015,74 @@
         yield (prev,)
 
 
+class SnapshotCache:
+    __slots__ = ('snapshots', '_start_rev', '_end_rev')
+
+    def __init__(self):
+        self.snapshots = collections.defaultdict(set)
+        self._start_rev = None
+        self._end_rev = None
+
+    def update(self, revlog, start_rev=0):
+        """find snapshots from start_rev to tip"""
+        nb_revs = len(revlog)
+        end_rev = nb_revs - 1
+        if start_rev > end_rev:
+            return  # range is empty
+
+        if self._start_rev is None:
+            assert self._end_rev is None
+            self._update(revlog, start_rev, end_rev)
+        elif not (self._start_rev <= start_rev and end_rev <= self._end_rev):
+            if start_rev < self._start_rev:
+                self._update(revlog, start_rev, self._start_rev - 1)
+            if self._end_rev < end_rev:
+                self._update(revlog, self._end_rev + 1, end_rev)
+
+        if self._start_rev is None:
+            assert self._end_rev is None
+            self._end_rev = end_rev
+            self._start_rev = start_rev
+        else:
+            self._start_rev = min(self._start_rev, start_rev)
+            self._end_rev = max(self._end_rev, end_rev)
+        assert self._start_rev <= self._end_rev, (
+            self._start_rev,
+            self._end_rev,
+        )
+
+    def _update(self, revlog, start_rev, end_rev):
+        """internal method that actually do update content"""
+        assert self._start_rev is None or (
+            start_rev < self._start_rev or start_rev > self._end_rev
+        ), (self._start_rev, self._end_rev, start_rev, end_rev)
+        assert self._start_rev is None or (
+            end_rev < self._start_rev or end_rev > self._end_rev
+        ), (self._start_rev, self._end_rev, start_rev, end_rev)
+        cache = self.snapshots
+        if util.safehasattr(revlog.index, b'findsnapshots'):
+            revlog.index.findsnapshots(cache, start_rev, end_rev)
+        else:
+            deltaparent = revlog.deltaparent
+            issnapshot = revlog.issnapshot
+            for rev in revlog.revs(start_rev, end_rev):
+                if issnapshot(rev):
+                    cache[deltaparent(rev)].add(rev)
+
+
 class deltacomputer:
-    def __init__(self, revlog, write_debug=None, debug_search=False):
+    def __init__(
+        self,
+        revlog,
+        write_debug=None,
+        debug_search=False,
+        debug_info=None,
+    ):
         self.revlog = revlog
         self._write_debug = write_debug
         self._debug_search = debug_search
+        self._debug_info = debug_info
+        self._snapshot_cache = SnapshotCache()
 
     def buildtext(self, revinfo, fh):
         """Builds a fulltext version of a revision
@@ -998,7 +1146,7 @@
                 snapshotdepth = len(revlog._deltachain(deltabase)[0])
         delta = None
         if revinfo.cachedelta:
-            cachebase, cachediff = revinfo.cachedelta
+            cachebase = revinfo.cachedelta[0]
             # check if the diff still apply
             currentbase = cachebase
             while (
@@ -1103,11 +1251,14 @@
         if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
             return self._fullsnapshotinfo(fh, revinfo, target_rev)
 
-        if self._write_debug is not None:
+        gather_debug = (
+            self._write_debug is not None or self._debug_info is not None
+        )
+        debug_search = self._write_debug is not None and self._debug_search
+
+        if gather_debug:
             start = util.timer()
 
-        debug_search = self._write_debug is not None and self._debug_search
-
         # count the number of different delta we tried (for debug purpose)
         dbg_try_count = 0
         # count the number of "search round" we did. (for debug purpose)
@@ -1122,7 +1273,7 @@
         deltainfo = None
         p1r, p2r = revlog.rev(p1), revlog.rev(p2)
 
-        if self._write_debug is not None:
+        if gather_debug:
             if p1r != nullrev:
                 p1_chain_len = revlog._chaininfo(p1r)[0]
             else:
@@ -1137,7 +1288,14 @@
             self._write_debug(msg)
 
         groups = _candidategroups(
-            self.revlog, revinfo.textlen, p1r, p2r, cachedelta
+            self.revlog,
+            revinfo.textlen,
+            p1r,
+            p2r,
+            cachedelta,
+            excluded_bases,
+            target_rev,
+            snapshot_cache=self._snapshot_cache,
         )
         candidaterevs = next(groups)
         while candidaterevs is not None:
@@ -1147,7 +1305,13 @@
                 if deltainfo is not None:
                     prev = deltainfo.base
 
-                if p1 in candidaterevs or p2 in candidaterevs:
+                if (
+                    cachedelta is not None
+                    and len(candidaterevs) == 1
+                    and cachedelta[0] in candidaterevs
+                ):
+                    round_type = b"cached-delta"
+                elif p1 in candidaterevs or p2 in candidaterevs:
                     round_type = b"parents"
                 elif prev is not None and all(c < prev for c in candidaterevs):
                     round_type = b"refine-down"
@@ -1195,16 +1359,7 @@
                     msg = b"DBG-DELTAS-SEARCH:     base=%d\n"
                     msg %= self.revlog.deltaparent(candidaterev)
                     self._write_debug(msg)
-                if candidaterev in excluded_bases:
-                    if debug_search:
-                        msg = b"DBG-DELTAS-SEARCH:     EXCLUDED\n"
-                        self._write_debug(msg)
-                    continue
-                if candidaterev >= target_rev:
-                    if debug_search:
-                        msg = b"DBG-DELTAS-SEARCH:     TOO-HIGH\n"
-                        self._write_debug(msg)
-                    continue
+
                 dbg_try_count += 1
 
                 if debug_search:
@@ -1216,7 +1371,7 @@
                     msg %= delta_end - delta_start
                     self._write_debug(msg)
                 if candidatedelta is not None:
-                    if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
+                    if is_good_delta_info(self.revlog, candidatedelta, revinfo):
                         if debug_search:
                             msg = b"DBG-DELTAS-SEARCH:     DELTA: length=%d (GOOD)\n"
                             msg %= candidatedelta.deltalen
@@ -1244,12 +1399,28 @@
         else:
             dbg_type = b"delta"
 
-        if self._write_debug is not None:
+        if gather_debug:
             end = util.timer()
+            if dbg_type == b'full':
+                used_cached = (
+                    cachedelta is not None
+                    and dbg_try_rounds == 0
+                    and dbg_try_count == 0
+                    and cachedelta[0] == nullrev
+                )
+            else:
+                used_cached = (
+                    cachedelta is not None
+                    and dbg_try_rounds == 1
+                    and dbg_try_count == 1
+                    and deltainfo.base == cachedelta[0]
+                )
             dbg = {
                 'duration': end - start,
                 'revision': target_rev,
+                'delta-base': deltainfo.base,  # pytype: disable=attribute-error
                 'search_round_count': dbg_try_rounds,
+                'using-cached-base': used_cached,
                 'delta_try_count': dbg_try_count,
                 'type': dbg_type,
                 'p1-chain-len': p1_chain_len,
@@ -1279,31 +1450,39 @@
                     target_revlog += b'%s:' % target_key
             dbg['target-revlog'] = target_revlog
 
-            msg = (
-                b"DBG-DELTAS:"
-                b" %-12s"
-                b" rev=%d:"
-                b" search-rounds=%d"
-                b" try-count=%d"
-                b" - delta-type=%-6s"
-                b" snap-depth=%d"
-                b" - p1-chain-length=%d"
-                b" p2-chain-length=%d"
-                b" - duration=%f"
-                b"\n"
-            )
-            msg %= (
-                dbg["target-revlog"],
-                dbg["revision"],
-                dbg["search_round_count"],
-                dbg["delta_try_count"],
-                dbg["type"],
-                dbg["snapshot-depth"],
-                dbg["p1-chain-len"],
-                dbg["p2-chain-len"],
-                dbg["duration"],
-            )
-            self._write_debug(msg)
+            if self._debug_info is not None:
+                self._debug_info.append(dbg)
+
+            if self._write_debug is not None:
+                msg = (
+                    b"DBG-DELTAS:"
+                    b" %-12s"
+                    b" rev=%d:"
+                    b" delta-base=%d"
+                    b" is-cached=%d"
+                    b" - search-rounds=%d"
+                    b" try-count=%d"
+                    b" - delta-type=%-6s"
+                    b" snap-depth=%d"
+                    b" - p1-chain-length=%d"
+                    b" p2-chain-length=%d"
+                    b" - duration=%f"
+                    b"\n"
+                )
+                msg %= (
+                    dbg["target-revlog"],
+                    dbg["revision"],
+                    dbg["delta-base"],
+                    dbg["using-cached-base"],
+                    dbg["search_round_count"],
+                    dbg["delta_try_count"],
+                    dbg["type"],
+                    dbg["snapshot-depth"],
+                    dbg["p1-chain-len"],
+                    dbg["p2-chain-len"],
+                    dbg["duration"],
+                )
+                self._write_debug(msg)
         return deltainfo
 
 
--- a/mercurial/revlogutils/docket.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revlogutils/docket.py	Thu Mar 02 22:45:44 2023 +0100
@@ -90,7 +90,7 @@
 # * 8 bytes: pending size of data
 # * 8 bytes: pending size of sidedata
 # * 1 bytes: default compression header
-S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
+S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBQQQQQQc')
 # * 1 bytes: size of index uuid
 # * 8 bytes: size of file
 S_OLD_UID = struct.Struct('>BL')
--- a/mercurial/revset.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/revset.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1868,13 +1868,12 @@
         dests = []
     missing = set()
     for path in urlutil.get_push_paths(repo, repo.ui, dests):
-        dest = path.pushloc or path.loc
         branches = path.branch, []
 
         revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
         if revs:
             revs = [repo.lookup(rev) for rev in revs]
-        other = hg.peer(repo, {}, dest)
+        other = hg.peer(repo, {}, path)
         try:
             with repo.ui.silent():
                 outgoing = discovery.findcommonoutgoing(
@@ -2130,11 +2129,9 @@
         dest = getstring(l[1], _(b"remote requires a repository path"))
     if not dest:
         dest = b'default'
-    dest, branches = urlutil.get_unique_pull_path(
-        b'remote', repo, repo.ui, dest
-    )
-
-    other = hg.peer(repo, {}, dest)
+    path = urlutil.get_unique_pull_path_obj(b'remote', repo.ui, dest)
+
+    other = hg.peer(repo, {}, path)
     n = other.lookup(q)
     if n in repo:
         r = repo[n].rev()
--- a/mercurial/scmposix.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/scmposix.py	Thu Mar 02 22:45:44 2023 +0100
@@ -4,6 +4,11 @@
 import os
 import sys
 
+from typing import (
+    List,
+    Tuple,
+)
+
 from .pycompat import getattr
 from . import (
     encoding,
@@ -11,6 +16,9 @@
     util,
 )
 
+if pycompat.TYPE_CHECKING:
+    from . import ui as uimod
+
 # BSD 'more' escapes ANSI color sequences by default. This can be disabled by
 # $MORE variable, but there's no compatible option with Linux 'more'. Given
 # OS X is widely used and most modern Unix systems would have 'less', setting
@@ -18,7 +26,7 @@
 fallbackpager = b'less'
 
 
-def _rcfiles(path):
+def _rcfiles(path: bytes) -> List[bytes]:
     rcs = [os.path.join(path, b'hgrc')]
     rcdir = os.path.join(path, b'hgrc.d')
     try:
@@ -34,7 +42,7 @@
     return rcs
 
 
-def systemrcpath():
+def systemrcpath() -> List[bytes]:
     path = []
     if pycompat.sysplatform == b'plan9':
         root = b'lib/mercurial'
@@ -49,7 +57,7 @@
     return path
 
 
-def userrcpath():
+def userrcpath() -> List[bytes]:
     if pycompat.sysplatform == b'plan9':
         return [encoding.environ[b'home'] + b'/lib/hgrc']
     elif pycompat.isdarwin:
@@ -65,7 +73,7 @@
         ]
 
 
-def termsize(ui):
+def termsize(ui: "uimod.ui") -> Tuple[int, int]:
     try:
         import termios
 
@@ -88,7 +96,7 @@
         except ValueError:
             pass
         except IOError as e:
-            if e[0] == errno.EINVAL:  # pytype: disable=unsupported-operands
+            if e.errno == errno.EINVAL:
                 pass
             else:
                 raise
--- a/mercurial/scmutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/scmutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1219,7 +1219,7 @@
                 )
 
 
-def addremove(repo, matcher, prefix, uipathfn, opts=None):
+def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
     if opts is None:
         opts = {}
     m = matcher
@@ -1279,7 +1279,9 @@
         repo, m, added + unknown, removed + deleted, similarity, uipathfn
     )
 
-    if not dry_run:
+    if not dry_run and (unknown or forgotten or deleted or renames):
+        if open_tr is not None:
+            open_tr()
         _markchanges(repo, unknown + forgotten, deleted, renames)
 
     for f in rejected:
@@ -1863,7 +1865,12 @@
 
 
 def gddeltaconfig(ui):
-    """helper function to know if incoming delta should be optimised"""
+    """helper function to know if incoming deltas should be optimized
+
+    The `format.generaldelta` config is an old form of the config that also
+    implies that incoming delta-bases should be never be trusted. This function
+    exists for this purpose.
+    """
     # experimental config: format.generaldelta
     return ui.configbool(b'format', b'generaldelta')
 
--- a/mercurial/scmwindows.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/scmwindows.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,4 +1,10 @@
 import os
+import winreg  # pytype: disable=import-error
+
+from typing import (
+    List,
+    Tuple,
+)
 
 from . import (
     encoding,
@@ -7,19 +13,14 @@
     win32,
 )
 
-try:
-    import _winreg as winreg  # pytype: disable=import-error
-
-    winreg.CloseKey
-except ImportError:
-    # py2 only
-    import winreg  # pytype: disable=import-error
+if pycompat.TYPE_CHECKING:
+    from . import ui as uimod
 
 # MS-DOS 'more' is the only pager available by default on Windows.
 fallbackpager = b'more'
 
 
-def systemrcpath():
+def systemrcpath() -> List[bytes]:
     '''return default os-specific hgrc search path'''
     rcpath = []
     filename = win32.executablepath()
@@ -27,7 +28,7 @@
     progrc = os.path.join(os.path.dirname(filename), b'mercurial.ini')
     rcpath.append(progrc)
 
-    def _processdir(progrcd):
+    def _processdir(progrcd: bytes) -> None:
         if os.path.isdir(progrcd):
             for f, kind in sorted(util.listdir(progrcd)):
                 if f.endswith(b'.rc'):
@@ -68,7 +69,7 @@
     return rcpath
 
 
-def userrcpath():
+def userrcpath() -> List[bytes]:
     '''return os-specific hgrc search path to the user dir'''
     home = _legacy_expanduser(b'~')
     path = [os.path.join(home, b'mercurial.ini'), os.path.join(home, b'.hgrc')]
@@ -79,7 +80,7 @@
     return path
 
 
-def _legacy_expanduser(path):
+def _legacy_expanduser(path: bytes) -> bytes:
     """Expand ~ and ~user constructs in the pre 3.8 style"""
 
     # Python 3.8+ changed the expansion of '~' from HOME to USERPROFILE.  See
@@ -111,5 +112,5 @@
     return userhome + path[i:]
 
 
-def termsize(ui):
+def termsize(ui: "uimod.ui") -> Tuple[int, int]:
     return win32.termsize()
--- a/mercurial/shelve.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/shelve.py	Thu Mar 02 22:45:44 2023 +0100
@@ -247,6 +247,14 @@
         for ext in shelvefileextensions:
             self.vfs.tryunlink(self.name + b'.' + ext)
 
+    def changed_files(self, ui, repo):
+        try:
+            ctx = repo.unfiltered()[self.readinfo()[b'node']]
+            return ctx.files()
+        except (FileNotFoundError, error.RepoLookupError):
+            filename = self.vfs.join(self.name + b'.patch')
+            return patch.changedfiles(ui, repo, filename)
+
 
 def _optimized_match(repo, node):
     """
@@ -424,10 +432,28 @@
 
 def _aborttransaction(repo, tr):
     """Abort current transaction for shelve/unshelve, but keep dirstate"""
-    dirstatebackupname = b'dirstate.shelve'
-    repo.dirstate.savebackup(None, dirstatebackupname)
-    tr.abort()
-    repo.dirstate.restorebackup(None, dirstatebackupname)
+    # disable the transaction invalidation of the dirstate, to preserve the
+    # current change in memory.
+    ds = repo.dirstate
+    # The assert below check that nobody else did such wrapping.
+    #
+    # These is not such other wrapping currently, but if someone try to
+    # implement one in the future, this will explicitly break here instead of
+    # misbehaving in subtle ways.
+    current_branch = ds.branch()
+    assert 'invalidate' not in vars(ds)
+    try:
+        # note : we could simply disable the transaction abort callback, but
+        # other code also tries to rollback and invalidate this.
+        ds.invalidate = lambda: None
+        tr.abort()
+    finally:
+        del ds.invalidate
+    # manually write the change in memory since we can no longer rely on the
+    # transaction to do so.
+    assert repo.currenttransaction() is None
+    repo.dirstate.write(None)
+    ds.setbranch(current_branch, None)
 
 
 def getshelvename(repo, parent, opts):
@@ -599,12 +625,15 @@
         activebookmark = _backupactivebookmark(repo)
         extra = {b'internal': b'shelve'}
         if includeunknown:
-            _includeunknownfiles(repo, pats, opts, extra)
+            with repo.dirstate.changing_files(repo):
+                _includeunknownfiles(repo, pats, opts, extra)
 
         if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
             # In non-bare shelve we don't store newly created branch
             # at bundled commit
-            repo.dirstate.setbranch(repo[b'.'].branch())
+            repo.dirstate.setbranch(
+                repo[b'.'].branch(), repo.currenttransaction()
+            )
 
         commitfunc = getcommitfunc(extra, interactive, editor=True)
         if not interactive:
@@ -629,7 +658,7 @@
 
         ui.status(_(b'shelved as %s\n') % name)
         if opts[b'keep']:
-            with repo.dirstate.parentchange():
+            with repo.dirstate.changing_parents(repo):
                 scmutil.movedirstate(repo, parent, match)
         else:
             hg.update(repo, parent.node())
@@ -638,7 +667,7 @@
                 ms.reset()
 
         if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
-            repo.dirstate.setbranch(origbranch)
+            repo.dirstate.setbranch(origbranch, repo.currenttransaction())
 
         _finishshelve(repo, tr)
     finally:
@@ -822,7 +851,7 @@
 
 def restorebranch(ui, repo, branchtorestore):
     if branchtorestore and branchtorestore != repo.dirstate.branch():
-        repo.dirstate.setbranch(branchtorestore)
+        repo.dirstate.setbranch(branchtorestore, repo.currenttransaction())
         ui.status(
             _(b'marked working directory as branch %s\n') % branchtorestore
         )
@@ -854,18 +883,18 @@
         shelvectx = repo[state.parents[1]]
         pendingctx = state.pendingctx
 
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             repo.setparents(state.pendingctx.node(), repo.nullid)
             repo.dirstate.write(repo.currenttransaction())
 
         targetphase = _target_phase(repo)
         overrides = {(b'phases', b'new-commit'): targetphase}
         with repo.ui.configoverride(overrides, b'unshelve'):
-            with repo.dirstate.parentchange():
+            with repo.dirstate.changing_parents(repo):
                 repo.setparents(state.parents[0], repo.nullid)
-                newnode, ispartialunshelve = _createunshelvectx(
-                    ui, repo, shelvectx, basename, interactive, opts
-                )
+            newnode, ispartialunshelve = _createunshelvectx(
+                ui, repo, shelvectx, basename, interactive, opts
+            )
 
         if newnode is None:
             shelvectx = state.pendingctx
@@ -1060,11 +1089,11 @@
             )
             raise error.ConflictResolutionRequired(b'unshelve')
 
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             repo.setparents(tmpwctx.node(), repo.nullid)
-            newnode, ispartialunshelve = _createunshelvectx(
-                ui, repo, shelvectx, basename, interactive, opts
-            )
+        newnode, ispartialunshelve = _createunshelvectx(
+            ui, repo, shelvectx, basename, interactive, opts
+        )
 
         if newnode is None:
             shelvectx = tmpwctx
@@ -1210,7 +1239,8 @@
         restorebranch(ui, repo, branchtorestore)
         shelvedstate.clear(repo)
         _finishunshelve(repo, oldtiprev, tr, activebookmark)
-        _forgetunknownfiles(repo, shelvectx, addedbefore)
+        with repo.dirstate.changing_files(repo):
+            _forgetunknownfiles(repo, shelvectx, addedbefore)
         if not ispartialunshelve:
             unshelvecleanup(ui, repo, basename, opts)
     finally:
--- a/mercurial/simplemerge.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/simplemerge.py	Thu Mar 02 22:45:44 2023 +0100
@@ -512,6 +512,8 @@
     conflicts = False
     if mode == b'union':
         lines = _resolve(m3, (1, 2))
+    elif mode == b'union-other-first':
+        lines = _resolve(m3, (2, 1))
     elif mode == b'local':
         lines = _resolve(m3, (1,))
     elif mode == b'other':
--- a/mercurial/sparse.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/sparse.py	Thu Mar 02 22:45:44 2023 +0100
@@ -451,7 +451,7 @@
                     message,
                 )
 
-        with repo.dirstate.parentchange():
+        with repo.dirstate.changing_parents(repo):
             mergemod.applyupdates(
                 repo,
                 tmresult,
@@ -655,7 +655,7 @@
     The remaining sparse config only has profiles, if defined. The working
     directory is refreshed, as needed.
     """
-    with repo.wlock(), repo.dirstate.parentchange():
+    with repo.wlock(), repo.dirstate.changing_parents(repo):
         raw = repo.vfs.tryread(b'sparse')
         includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
 
@@ -671,7 +671,7 @@
     The updated sparse config is written out and the working directory
     is refreshed, as needed.
     """
-    with repo.wlock(), repo.dirstate.parentchange():
+    with repo.wlock(), repo.dirstate.changing_parents(repo):
         # read current configuration
         raw = repo.vfs.tryread(b'sparse')
         includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
@@ -730,7 +730,7 @@
 
     The new config is written out and a working directory refresh is performed.
     """
-    with repo.wlock(), repo.lock(), repo.dirstate.parentchange():
+    with repo.wlock(), repo.lock(), repo.dirstate.changing_parents(repo):
         raw = repo.vfs.tryread(b'sparse')
         oldinclude, oldexclude, oldprofiles = parseconfig(
             repo.ui, raw, b'sparse'
--- a/mercurial/sshpeer.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/sshpeer.py	Thu Mar 02 22:45:44 2023 +0100
@@ -372,7 +372,7 @@
 
 class sshv1peer(wireprotov1peer.wirepeer):
     def __init__(
-        self, ui, url, proc, stdin, stdout, stderr, caps, autoreadstderr=True
+        self, ui, path, proc, stdin, stdout, stderr, caps, autoreadstderr=True
     ):
         """Create a peer from an existing SSH connection.
 
@@ -383,8 +383,7 @@
         ``autoreadstderr`` denotes whether to automatically read from
         stderr and to forward its output.
         """
-        self._url = url
-        self.ui = ui
+        super().__init__(ui, path=path)
         # self._subprocess is unused. Keeping a handle on the process
         # holds a reference and prevents it from being garbage collected.
         self._subprocess = proc
@@ -411,14 +410,11 @@
     # Begin of ipeerconnection interface.
 
     def url(self):
-        return self._url
+        return self.path.loc
 
     def local(self):
         return None
 
-    def peer(self):
-        return self
-
     def canpush(self):
         return True
 
@@ -610,16 +606,16 @@
         )
 
 
-def instance(ui, path, create, intents=None, createopts=None):
+def make_peer(ui, path, create, intents=None, createopts=None):
     """Create an SSH peer.
 
     The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
     """
-    u = urlutil.url(path, parsequery=False, parsefragment=False)
+    u = urlutil.url(path.loc, parsequery=False, parsefragment=False)
     if u.scheme != b'ssh' or not u.host or u.path is None:
         raise error.RepoError(_(b"couldn't parse location %s") % path)
 
-    urlutil.checksafessh(path)
+    urlutil.checksafessh(path.loc)
 
     if u.passwd is not None:
         raise error.RepoError(_(b'password in URL not supported'))
--- a/mercurial/statichttprepo.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/statichttprepo.py	Thu Mar 02 22:45:44 2023 +0100
@@ -225,6 +225,7 @@
         self.encodepats = None
         self.decodepats = None
         self._transref = None
+        self._dirstate = None
 
     def _restrictcapabilities(self, caps):
         caps = super(statichttprepository, self)._restrictcapabilities(caps)
@@ -236,8 +237,8 @@
     def local(self):
         return False
 
-    def peer(self):
-        return statichttppeer(self)
+    def peer(self, path=None):
+        return statichttppeer(self, path=path)
 
     def wlock(self, wait=True):
         raise error.LockUnavailable(
@@ -259,7 +260,8 @@
         pass  # statichttprepository are read only
 
 
-def instance(ui, path, create, intents=None, createopts=None):
+def make_peer(ui, path, create, intents=None, createopts=None):
     if create:
         raise error.Abort(_(b'cannot create new static-http repository'))
-    return statichttprepository(ui, path[7:])
+    url = path.loc[7:]
+    return statichttprepository(ui, url).peer(path=path)
--- a/mercurial/statprof.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/statprof.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1049,7 +1049,7 @@
     # process options
     try:
         opts, args = pycompat.getoptb(
-            sys.argv[optstart:],
+            pycompat.sysargv[optstart:],
             b"hl:f:o:p:",
             [b"help", b"limit=", b"file=", b"output-file=", b"script-path="],
         )
--- a/mercurial/strip.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/strip.py	Thu Mar 02 22:45:44 2023 +0100
@@ -241,31 +241,32 @@
 
         revs = sorted(rootnodes)
         if update and opts.get(b'keep'):
-            urev = _findupdatetarget(repo, revs)
-            uctx = repo[urev]
+            with repo.dirstate.changing_parents(repo):
+                urev = _findupdatetarget(repo, revs)
+                uctx = repo[urev]
 
-            # only reset the dirstate for files that would actually change
-            # between the working context and uctx
-            descendantrevs = repo.revs(b"only(., %d)", uctx.rev())
-            changedfiles = []
-            for rev in descendantrevs:
-                # blindly reset the files, regardless of what actually changed
-                changedfiles.extend(repo[rev].files())
+                # only reset the dirstate for files that would actually change
+                # between the working context and uctx
+                descendantrevs = repo.revs(b"only(., %d)", uctx.rev())
+                changedfiles = []
+                for rev in descendantrevs:
+                    # blindly reset the files, regardless of what actually changed
+                    changedfiles.extend(repo[rev].files())
 
-            # reset files that only changed in the dirstate too
-            dirstate = repo.dirstate
-            dirchanges = [
-                f for f in dirstate if not dirstate.get_entry(f).maybe_clean
-            ]
-            changedfiles.extend(dirchanges)
+                # reset files that only changed in the dirstate too
+                dirstate = repo.dirstate
+                dirchanges = [
+                    f for f in dirstate if not dirstate.get_entry(f).maybe_clean
+                ]
+                changedfiles.extend(dirchanges)
 
-            repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
-            repo.dirstate.write(repo.currenttransaction())
+                repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
+                repo.dirstate.write(repo.currenttransaction())
 
-            # clear resolve state
-            mergestatemod.mergestate.clean(repo)
+                # clear resolve state
+                mergestatemod.mergestate.clean(repo)
 
-            update = False
+                update = False
 
         strip(
             ui,
--- a/mercurial/subrepo.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/subrepo.py	Thu Mar 02 22:45:44 2023 +0100
@@ -569,9 +569,20 @@
 
     @annotatesubrepoerror
     def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
-        return cmdutil.add(
-            ui, self._repo, match, prefix, uipathfn, explicitonly, **opts
-        )
+        # XXX Ideally, we could let the caller take the `changing_files`
+        # context.  However this is not an abstraction that make sense for
+        # other repository types, and leaking that details purely related to
+        # dirstate seems unfortunate. So for now the context will be used here.
+        with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
+            return cmdutil.add(
+                ui,
+                self._repo,
+                match,
+                prefix,
+                uipathfn,
+                explicitonly,
+                **opts,
+            )
 
     @annotatesubrepoerror
     def addremove(self, m, prefix, uipathfn, opts):
@@ -580,7 +591,18 @@
         # be used to process sibling subrepos however.
         opts = copy.copy(opts)
         opts[b'subrepos'] = True
-        return scmutil.addremove(self._repo, m, prefix, uipathfn, opts)
+        # XXX Ideally, we could let the caller take the `changing_files`
+        # context.  However this is not an abstraction that make sense for
+        # other repository types, and leaking that details purely related to
+        # dirstate seems unfortunate. So for now the context will be used here.
+        with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
+            return scmutil.addremove(
+                self._repo,
+                m,
+                prefix,
+                uipathfn,
+                opts,
+            )
 
     @annotatesubrepoerror
     def cat(self, match, fm, fntemplate, prefix, **opts):
@@ -621,7 +643,7 @@
                 match,
                 prefix=prefix,
                 listsubrepos=True,
-                **opts
+                **opts,
             )
         except error.RepoLookupError as inst:
             self.ui.warn(
@@ -946,16 +968,21 @@
 
     @annotatesubrepoerror
     def forget(self, match, prefix, uipathfn, dryrun, interactive):
-        return cmdutil.forget(
-            self.ui,
-            self._repo,
-            match,
-            prefix,
-            uipathfn,
-            True,
-            dryrun=dryrun,
-            interactive=interactive,
-        )
+        # XXX Ideally, we could let the caller take the `changing_files`
+        # context.  However this is not an abstraction that make sense for
+        # other repository types, and leaking that details purely related to
+        # dirstate seems unfortunate. So for now the context will be used here.
+        with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
+            return cmdutil.forget(
+                self.ui,
+                self._repo,
+                match,
+                prefix,
+                uipathfn,
+                True,
+                dryrun=dryrun,
+                interactive=interactive,
+            )
 
     @annotatesubrepoerror
     def removefiles(
@@ -969,17 +996,22 @@
         dryrun,
         warnings,
     ):
-        return cmdutil.remove(
-            self.ui,
-            self._repo,
-            matcher,
-            prefix,
-            uipathfn,
-            after,
-            force,
-            subrepos,
-            dryrun,
-        )
+        # XXX Ideally, we could let the caller take the `changing_files`
+        # context.  However this is not an abstraction that make sense for
+        # other repository types, and leaking that details purely related to
+        # dirstate seems unfortunate. So for now the context will be used here.
+        with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
+            return cmdutil.remove(
+                self.ui,
+                self._repo,
+                matcher,
+                prefix,
+                uipathfn,
+                after,
+                force,
+                subrepos,
+                dryrun,
+            )
 
     @annotatesubrepoerror
     def revert(self, substate, *pats, **opts):
@@ -1009,7 +1041,12 @@
             pats = [b'set:modified()']
         else:
             pats = []
-        cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts)
+        # XXX Ideally, we could let the caller take the `changing_files`
+        # context.  However this is not an abstraction that make sense for
+        # other repository types, and leaking that details purely related to
+        # dirstate seems unfortunate. So for now the context will be used here.
+        with self._repo.wlock(), self._repo.dirstate.changing_files(self._repo):
+            cmdutil.revert(self.ui, self._repo, ctx, *pats, **opts)
 
     def shortid(self, revid):
         return revid[:12]
@@ -1123,7 +1160,7 @@
             stdout=subprocess.PIPE,
             stderr=subprocess.PIPE,
             env=procutil.tonativeenv(env),
-            **extrakw
+            **extrakw,
         )
         stdout, stderr = map(util.fromnativeeol, p.communicate())
         stderr = stderr.strip()
@@ -1488,7 +1525,7 @@
             close_fds=procutil.closefds,
             stdout=subprocess.PIPE,
             stderr=errpipe,
-            **extrakw
+            **extrakw,
         )
         if stream:
             return p.stdout, None
--- a/mercurial/tags.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/tags.py	Thu Mar 02 22:45:44 2023 +0100
@@ -664,8 +664,9 @@
 
     repo.invalidatecaches()
 
-    if b'.hgtags' not in repo.dirstate:
-        repo[None].add([b'.hgtags'])
+    with repo.dirstate.changing_files(repo):
+        if b'.hgtags' not in repo.dirstate:
+            repo[None].add([b'.hgtags'])
 
     m = matchmod.exact([b'.hgtags'])
     tagnode = repo.commit(
--- a/mercurial/templater.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/templater.py	Thu Mar 02 22:45:44 2023 +0100
@@ -177,10 +177,17 @@
             quote = program[pos : pos + 2]
             s = pos = pos + 2
             while pos < end:  # find closing escaped quote
+                # pycompat.bytestr (and bytes) both have .startswith() that
+                # takes an optional start and an optional end, but pytype thinks
+                # it only takes 2 args.
+
+                # pytype: disable=wrong-arg-count
                 if program.startswith(b'\\\\\\', pos, end):
                     pos += 4  # skip over double escaped characters
                     continue
                 if program.startswith(quote, pos, end):
+                    # pytype: enable=wrong-arg-count
+
                     # interpret as if it were a part of an outer string
                     data = parser.unescapestr(program[s:pos])
                     if token == b'template':
@@ -300,7 +307,14 @@
                 return
 
             parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, b'}'))
+
+            # pycompat.bytestr (and bytes) both have .startswith() that
+            # takes an optional start and an optional end, but pytype thinks
+            # it only takes 2 args.
+
+            # pytype: disable=wrong-arg-count
             if not tmpl.startswith(b'}', pos):
+                # pytype: enable=wrong-arg-count
                 raise error.ParseError(_(b"invalid token"), pos)
             yield (b'template', parseres, n)
             pos += 1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/LICENSE	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Hynek Schlawack and the attrs contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
--- a/mercurial/thirdparty/attr/LICENSE.txt	Thu Mar 02 15:21:36 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Hynek Schlawack
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
--- a/mercurial/thirdparty/attr/__init__.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/__init__.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,37 +1,35 @@
-from __future__ import absolute_import, division, print_function
+# SPDX-License-Identifier: MIT
+
+
+import sys
+
+from functools import partial
 
-from ._funcs import (
-    asdict,
-    assoc,
-    astuple,
-    evolve,
-    has,
-)
+from . import converters, exceptions, filters, setters, validators
+from ._cmp import cmp_using
+from ._config import get_run_validators, set_run_validators
+from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
 from ._make import (
+    NOTHING,
     Attribute,
     Factory,
-    NOTHING,
-    attr,
-    attributes,
+    attrib,
+    attrs,
     fields,
+    fields_dict,
     make_class,
     validate,
 )
-from ._config import (
-    get_run_validators,
-    set_run_validators,
-)
-from . import exceptions
-from . import filters
-from . import converters
-from . import validators
+from ._version_info import VersionInfo
 
 
-__version__ = "17.2.0"
+__version__ = "22.1.0"
+__version_info__ = VersionInfo._from_version_string(__version__)
 
 __title__ = "attrs"
 __description__ = "Classes Without Boilerplate"
-__uri__ = "http://www.attrs.org/"
+__url__ = "https://www.attrs.org/"
+__uri__ = __url__
 __doc__ = __description__ + " <" + __uri__ + ">"
 
 __author__ = "Hynek Schlawack"
@@ -41,8 +39,9 @@
 __copyright__ = "Copyright (c) 2015 Hynek Schlawack"
 
 
-s = attrs = attributes
-ib = attrib = attr
+s = attributes = attrs
+ib = attr = attrib
+dataclass = partial(attrs, auto_attribs=True)  # happy Easter ;)
 
 __all__ = [
     "Attribute",
@@ -55,17 +54,26 @@
     "attrib",
     "attributes",
     "attrs",
+    "cmp_using",
     "converters",
     "evolve",
     "exceptions",
     "fields",
+    "fields_dict",
     "filters",
     "get_run_validators",
     "has",
     "ib",
     "make_class",
+    "resolve_types",
     "s",
     "set_run_validators",
+    "setters",
     "validate",
     "validators",
 ]
+
+if sys.version_info[:2] >= (3, 6):
+    from ._next_gen import define, field, frozen, mutable  # noqa: F401
+
+    __all__.extend(("define", "field", "frozen", "mutable"))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/__init__.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,486 @@
+import sys
+
+from typing import (
+    Any,
+    Callable,
+    ClassVar,
+    Dict,
+    Generic,
+    List,
+    Mapping,
+    Optional,
+    Protocol,
+    Sequence,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+    overload,
+)
+
+# `import X as X` is required to make these public
+from . import converters as converters
+from . import exceptions as exceptions
+from . import filters as filters
+from . import setters as setters
+from . import validators as validators
+from ._cmp import cmp_using as cmp_using
+from ._version_info import VersionInfo
+
+__version__: str
+__version_info__: VersionInfo
+__title__: str
+__description__: str
+__url__: str
+__uri__: str
+__author__: str
+__email__: str
+__license__: str
+__copyright__: str
+
+_T = TypeVar("_T")
+_C = TypeVar("_C", bound=type)
+
+_EqOrderType = Union[bool, Callable[[Any], Any]]
+_ValidatorType = Callable[[Any, Attribute[_T], _T], Any]
+_ConverterType = Callable[[Any], Any]
+_FilterType = Callable[[Attribute[_T], _T], bool]
+_ReprType = Callable[[Any], str]
+_ReprArgType = Union[bool, _ReprType]
+_OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any]
+_OnSetAttrArgType = Union[
+    _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType
+]
+_FieldTransformer = Callable[
+    [type, List[Attribute[Any]]], List[Attribute[Any]]
+]
+# FIXME: in reality, if multiple validators are passed they must be in a list
+# or tuple, but those are invariant and so would prevent subtypes of
+# _ValidatorType from working when passed in a list or tuple.
+_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]]
+
+# A protocol to be able to statically accept an attrs class.
+class AttrsInstance(Protocol):
+    __attrs_attrs__: ClassVar[Any]
+
+# _make --
+
+NOTHING: object
+
+# NOTE: Factory lies about its return type to make this possible:
+# `x: List[int] # = Factory(list)`
+# Work around mypy issue #4554 in the common case by using an overload.
+if sys.version_info >= (3, 8):
+    from typing import Literal
+    @overload
+    def Factory(factory: Callable[[], _T]) -> _T: ...
+    @overload
+    def Factory(
+        factory: Callable[[Any], _T],
+        takes_self: Literal[True],
+    ) -> _T: ...
+    @overload
+    def Factory(
+        factory: Callable[[], _T],
+        takes_self: Literal[False],
+    ) -> _T: ...
+
+else:
+    @overload
+    def Factory(factory: Callable[[], _T]) -> _T: ...
+    @overload
+    def Factory(
+        factory: Union[Callable[[Any], _T], Callable[[], _T]],
+        takes_self: bool = ...,
+    ) -> _T: ...
+
+# Static type inference support via __dataclass_transform__ implemented as per:
+# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md
+# This annotation must be applied to all overloads of "define" and "attrs"
+#
+# NOTE: This is a typing construct and does not exist at runtime.  Extensions
+# wrapping attrs decorators should declare a separate __dataclass_transform__
+# signature in the extension module using the specification linked above to
+# provide pyright support.
+def __dataclass_transform__(
+    *,
+    eq_default: bool = True,
+    order_default: bool = False,
+    kw_only_default: bool = False,
+    field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
+) -> Callable[[_T], _T]: ...
+
+class Attribute(Generic[_T]):
+    name: str
+    default: Optional[_T]
+    validator: Optional[_ValidatorType[_T]]
+    repr: _ReprArgType
+    cmp: _EqOrderType
+    eq: _EqOrderType
+    order: _EqOrderType
+    hash: Optional[bool]
+    init: bool
+    converter: Optional[_ConverterType]
+    metadata: Dict[Any, Any]
+    type: Optional[Type[_T]]
+    kw_only: bool
+    on_setattr: _OnSetAttrType
+    def evolve(self, **changes: Any) -> "Attribute[Any]": ...
+
+# NOTE: We had several choices for the annotation to use for type arg:
+# 1) Type[_T]
+#   - Pros: Handles simple cases correctly
+#   - Cons: Might produce less informative errors in the case of conflicting
+#     TypeVars e.g. `attr.ib(default='bad', type=int)`
+# 2) Callable[..., _T]
+#   - Pros: Better error messages than #1 for conflicting TypeVars
+#   - Cons: Terrible error messages for validator checks.
+#   e.g. attr.ib(type=int, validator=validate_str)
+#        -> error: Cannot infer function type argument
+# 3) type (and do all of the work in the mypy plugin)
+#   - Pros: Simple here, and we could customize the plugin with our own errors.
+#   - Cons: Would need to write mypy plugin code to handle all the cases.
+# We chose option #1.
+
+# `attr` lies about its return type to make the following possible:
+#     attr()    -> Any
+#     attr(8)   -> int
+#     attr(validator=<some callable>)  -> Whatever the callable expects.
+# This makes this type of assignments possible:
+#     x: int = attr(8)
+#
+# This form catches explicit None or no default but with no other arguments
+# returns Any.
+@overload
+def attrib(
+    default: None = ...,
+    validator: None = ...,
+    repr: _ReprArgType = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    type: None = ...,
+    converter: None = ...,
+    factory: None = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+
+# This form catches an explicit None or no default and infers the type from the
+# other arguments.
+@overload
+def attrib(
+    default: None = ...,
+    validator: Optional[_ValidatorArgType[_T]] = ...,
+    repr: _ReprArgType = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    type: Optional[Type[_T]] = ...,
+    converter: Optional[_ConverterType] = ...,
+    factory: Optional[Callable[[], _T]] = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form catches an explicit default argument.
+@overload
+def attrib(
+    default: _T,
+    validator: Optional[_ValidatorArgType[_T]] = ...,
+    repr: _ReprArgType = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    type: Optional[Type[_T]] = ...,
+    converter: Optional[_ConverterType] = ...,
+    factory: Optional[Callable[[], _T]] = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form covers type=non-Type: e.g. forward references (str), Any
+@overload
+def attrib(
+    default: Optional[_T] = ...,
+    validator: Optional[_ValidatorArgType[_T]] = ...,
+    repr: _ReprArgType = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    type: object = ...,
+    converter: Optional[_ConverterType] = ...,
+    factory: Optional[Callable[[], _T]] = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+@overload
+def field(
+    *,
+    default: None = ...,
+    validator: None = ...,
+    repr: _ReprArgType = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    converter: None = ...,
+    factory: None = ...,
+    kw_only: bool = ...,
+    eq: Optional[bool] = ...,
+    order: Optional[bool] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+
+# This form catches an explicit None or no default and infers the type from the
+# other arguments.
+@overload
+def field(
+    *,
+    default: None = ...,
+    validator: Optional[_ValidatorArgType[_T]] = ...,
+    repr: _ReprArgType = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    converter: Optional[_ConverterType] = ...,
+    factory: Optional[Callable[[], _T]] = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form catches an explicit default argument.
+@overload
+def field(
+    *,
+    default: _T,
+    validator: Optional[_ValidatorArgType[_T]] = ...,
+    repr: _ReprArgType = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    converter: Optional[_ConverterType] = ...,
+    factory: Optional[Callable[[], _T]] = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> _T: ...
+
+# This form covers type=non-Type: e.g. forward references (str), Any
+@overload
+def field(
+    *,
+    default: Optional[_T] = ...,
+    validator: Optional[_ValidatorArgType[_T]] = ...,
+    repr: _ReprArgType = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    metadata: Optional[Mapping[Any, Any]] = ...,
+    converter: Optional[_ConverterType] = ...,
+    factory: Optional[Callable[[], _T]] = ...,
+    kw_only: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+) -> Any: ...
+@overload
+@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
+def attrs(
+    maybe_cls: _C,
+    these: Optional[Dict[str, Any]] = ...,
+    repr_ns: Optional[str] = ...,
+    repr: bool = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    slots: bool = ...,
+    frozen: bool = ...,
+    weakref_slot: bool = ...,
+    str: bool = ...,
+    auto_attribs: bool = ...,
+    kw_only: bool = ...,
+    cache_hash: bool = ...,
+    auto_exc: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    auto_detect: bool = ...,
+    collect_by_mro: bool = ...,
+    getstate_setstate: Optional[bool] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+    field_transformer: Optional[_FieldTransformer] = ...,
+    match_args: bool = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
+def attrs(
+    maybe_cls: None = ...,
+    these: Optional[Dict[str, Any]] = ...,
+    repr_ns: Optional[str] = ...,
+    repr: bool = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    slots: bool = ...,
+    frozen: bool = ...,
+    weakref_slot: bool = ...,
+    str: bool = ...,
+    auto_attribs: bool = ...,
+    kw_only: bool = ...,
+    cache_hash: bool = ...,
+    auto_exc: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    auto_detect: bool = ...,
+    collect_by_mro: bool = ...,
+    getstate_setstate: Optional[bool] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+    field_transformer: Optional[_FieldTransformer] = ...,
+    match_args: bool = ...,
+) -> Callable[[_C], _C]: ...
+@overload
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(
+    maybe_cls: _C,
+    *,
+    these: Optional[Dict[str, Any]] = ...,
+    repr: bool = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    slots: bool = ...,
+    frozen: bool = ...,
+    weakref_slot: bool = ...,
+    str: bool = ...,
+    auto_attribs: bool = ...,
+    kw_only: bool = ...,
+    cache_hash: bool = ...,
+    auto_exc: bool = ...,
+    eq: Optional[bool] = ...,
+    order: Optional[bool] = ...,
+    auto_detect: bool = ...,
+    getstate_setstate: Optional[bool] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+    field_transformer: Optional[_FieldTransformer] = ...,
+    match_args: bool = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(
+    maybe_cls: None = ...,
+    *,
+    these: Optional[Dict[str, Any]] = ...,
+    repr: bool = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    slots: bool = ...,
+    frozen: bool = ...,
+    weakref_slot: bool = ...,
+    str: bool = ...,
+    auto_attribs: bool = ...,
+    kw_only: bool = ...,
+    cache_hash: bool = ...,
+    auto_exc: bool = ...,
+    eq: Optional[bool] = ...,
+    order: Optional[bool] = ...,
+    auto_detect: bool = ...,
+    getstate_setstate: Optional[bool] = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+    field_transformer: Optional[_FieldTransformer] = ...,
+    match_args: bool = ...,
+) -> Callable[[_C], _C]: ...
+
+mutable = define
+frozen = define  # they differ only in their defaults
+
+def fields(cls: Type[AttrsInstance]) -> Any: ...
+def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ...
+def validate(inst: AttrsInstance) -> None: ...
+def resolve_types(
+    cls: _C,
+    globalns: Optional[Dict[str, Any]] = ...,
+    localns: Optional[Dict[str, Any]] = ...,
+    attribs: Optional[List[Attribute[Any]]] = ...,
+) -> _C: ...
+
+# TODO: add support for returning a proper attrs class from the mypy plugin
+# we use Any instead of _CountingAttr so that e.g. `make_class('Foo',
+# [attr.ib()])` is valid
+def make_class(
+    name: str,
+    attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]],
+    bases: Tuple[type, ...] = ...,
+    repr_ns: Optional[str] = ...,
+    repr: bool = ...,
+    cmp: Optional[_EqOrderType] = ...,
+    hash: Optional[bool] = ...,
+    init: bool = ...,
+    slots: bool = ...,
+    frozen: bool = ...,
+    weakref_slot: bool = ...,
+    str: bool = ...,
+    auto_attribs: bool = ...,
+    kw_only: bool = ...,
+    cache_hash: bool = ...,
+    auto_exc: bool = ...,
+    eq: Optional[_EqOrderType] = ...,
+    order: Optional[_EqOrderType] = ...,
+    collect_by_mro: bool = ...,
+    on_setattr: Optional[_OnSetAttrArgType] = ...,
+    field_transformer: Optional[_FieldTransformer] = ...,
+) -> type: ...
+
+# _funcs --
+
+# TODO: add support for returning TypedDict from the mypy plugin
+# FIXME: asdict/astuple do not honor their factory args. Waiting on one of
+# these:
+# https://github.com/python/mypy/issues/4236
+# https://github.com/python/typing/issues/253
+# XXX: remember to fix attrs.asdict/astuple too!
+def asdict(
+    inst: AttrsInstance,
+    recurse: bool = ...,
+    filter: Optional[_FilterType[Any]] = ...,
+    dict_factory: Type[Mapping[Any, Any]] = ...,
+    retain_collection_types: bool = ...,
+    value_serializer: Optional[
+        Callable[[type, Attribute[Any], Any], Any]
+    ] = ...,
+    tuple_keys: Optional[bool] = ...,
+) -> Dict[str, Any]: ...
+
+# TODO: add support for returning NamedTuple from the mypy plugin
+def astuple(
+    inst: AttrsInstance,
+    recurse: bool = ...,
+    filter: Optional[_FilterType[Any]] = ...,
+    tuple_factory: Type[Sequence[Any]] = ...,
+    retain_collection_types: bool = ...,
+) -> Tuple[Any, ...]: ...
+def has(cls: type) -> bool: ...
+def assoc(inst: _T, **changes: Any) -> _T: ...
+def evolve(inst: _T, **changes: Any) -> _T: ...
+
+# _config --
+
+def set_run_validators(run: bool) -> None: ...
+def get_run_validators() -> bool: ...
+
+# aliases --
+
+s = attributes = attrs
+ib = attr = attrib
+dataclass = attrs  # Technically, partial(attrs, auto_attribs=True) ;)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/_cmp.py	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: MIT
+
+
+import functools
+import types
+
+from ._make import _make_ne
+
+
+_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
+
+
+def cmp_using(
+    eq=None,
+    lt=None,
+    le=None,
+    gt=None,
+    ge=None,
+    require_same_type=True,
+    class_name="Comparable",
+):
+    """
+    Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
+    ``cmp`` arguments to customize field comparison.
+
+    The resulting class will have a full set of ordering methods if
+    at least one of ``{lt, le, gt, ge}`` and ``eq``  are provided.
+
+    :param Optional[callable] eq: `callable` used to evaluate equality
+        of two objects.
+    :param Optional[callable] lt: `callable` used to evaluate whether
+        one object is less than another object.
+    :param Optional[callable] le: `callable` used to evaluate whether
+        one object is less than or equal to another object.
+    :param Optional[callable] gt: `callable` used to evaluate whether
+        one object is greater than another object.
+    :param Optional[callable] ge: `callable` used to evaluate whether
+        one object is greater than or equal to another object.
+
+    :param bool require_same_type: When `True`, equality and ordering methods
+        will return `NotImplemented` if objects are not of the same type.
+
+    :param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
+
+    See `comparison` for more details.
+
+    .. versionadded:: 21.1.0
+    """
+
+    body = {
+        "__slots__": ["value"],
+        "__init__": _make_init(),
+        "_requirements": [],
+        "_is_comparable_to": _is_comparable_to,
+    }
+
+    # Add operations.
+    num_order_functions = 0
+    has_eq_function = False
+
+    if eq is not None:
+        has_eq_function = True
+        body["__eq__"] = _make_operator("eq", eq)
+        body["__ne__"] = _make_ne()
+
+    if lt is not None:
+        num_order_functions += 1
+        body["__lt__"] = _make_operator("lt", lt)
+
+    if le is not None:
+        num_order_functions += 1
+        body["__le__"] = _make_operator("le", le)
+
+    if gt is not None:
+        num_order_functions += 1
+        body["__gt__"] = _make_operator("gt", gt)
+
+    if ge is not None:
+        num_order_functions += 1
+        body["__ge__"] = _make_operator("ge", ge)
+
+    type_ = types.new_class(
+        class_name, (object,), {}, lambda ns: ns.update(body)
+    )
+
+    # Add same type requirement.
+    if require_same_type:
+        type_._requirements.append(_check_same_type)
+
+    # Add total ordering if at least one operation was defined.
+    if 0 < num_order_functions < 4:
+        if not has_eq_function:
+            # functools.total_ordering requires __eq__ to be defined,
+            # so raise early error here to keep a nice stack.
+            raise ValueError(
+                "eq must be define is order to complete ordering from "
+                "lt, le, gt, ge."
+            )
+        type_ = functools.total_ordering(type_)
+
+    return type_
+
+
+def _make_init():
+    """
+    Create __init__ method.
+    """
+
+    def __init__(self, value):
+        """
+        Initialize object with *value*.
+        """
+        self.value = value
+
+    return __init__
+
+
+def _make_operator(name, func):
+    """
+    Create operator method.
+    """
+
+    def method(self, other):
+        if not self._is_comparable_to(other):
+            return NotImplemented
+
+        result = func(self.value, other.value)
+        if result is NotImplemented:
+            return NotImplemented
+
+        return result
+
+    method.__name__ = "__%s__" % (name,)
+    method.__doc__ = "Return a %s b.  Computed by attrs." % (
+        _operation_names[name],
+    )
+
+    return method
+
+
+def _is_comparable_to(self, other):
+    """
+    Check whether `other` is comparable to `self`.
+    """
+    for func in self._requirements:
+        if not func(self, other):
+            return False
+    return True
+
+
+def _check_same_type(self, other):
+    """
+    Return True if *self* and *other* are of the same type, False otherwise.
+    """
+    return other.value.__class__ is self.value.__class__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/_cmp.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,13 @@
+from typing import Any, Callable, Optional, Type
+
+_CompareWithType = Callable[[Any, Any], bool]
+
+def cmp_using(
+    eq: Optional[_CompareWithType],
+    lt: Optional[_CompareWithType],
+    le: Optional[_CompareWithType],
+    gt: Optional[_CompareWithType],
+    ge: Optional[_CompareWithType],
+    require_same_type: bool,
+    class_name: str,
+) -> Type: ...
--- a/mercurial/thirdparty/attr/_compat.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/_compat.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,90 +1,185 @@
-from __future__ import absolute_import, division, print_function
+# SPDX-License-Identifier: MIT
+
+
+import inspect
+import platform
+import sys
+import threading
+import types
+import warnings
+
+from collections.abc import Mapping, Sequence  # noqa
+
+
+PYPY = platform.python_implementation() == "PyPy"
+PY36 = sys.version_info[:2] >= (3, 6)
+HAS_F_STRINGS = PY36
+PY310 = sys.version_info[:2] >= (3, 10)
 
-import sys
-import types
+
+if PYPY or PY36:
+    ordered_dict = dict
+else:
+    from collections import OrderedDict
+
+    ordered_dict = OrderedDict
+
+
+def just_warn(*args, **kw):
+    warnings.warn(
+        "Running interpreter doesn't sufficiently support code object "
+        "introspection.  Some features like bare super() or accessing "
+        "__class__ will not work with slotted classes.",
+        RuntimeWarning,
+        stacklevel=2,
+    )
 
 
-PY2 = sys.version_info[0] == 2
+class _AnnotationExtractor:
+    """
+    Extract type annotations from a callable, returning None whenever there
+    is none.
+    """
+
+    __slots__ = ["sig"]
+
+    def __init__(self, callable):
+        try:
+            self.sig = inspect.signature(callable)
+        except (ValueError, TypeError):  # inspect failed
+            self.sig = None
+
+    def get_first_param_type(self):
+        """
+        Return the type annotation of the first argument if it's not empty.
+        """
+        if not self.sig:
+            return None
+
+        params = list(self.sig.parameters.values())
+        if params and params[0].annotation is not inspect.Parameter.empty:
+            return params[0].annotation
+
+        return None
+
+    def get_return_type(self):
+        """
+        Return the return type if it's not empty.
+        """
+        if (
+            self.sig
+            and self.sig.return_annotation is not inspect.Signature.empty
+        ):
+            return self.sig.return_annotation
+
+        return None
 
 
-if PY2:
-    from UserDict import IterableUserDict
-
-    # We 'bundle' isclass instead of using inspect as importing inspect is
-    # fairly expensive (order of 10-15 ms for a modern machine in 2016)
-    def isclass(klass):
-        return isinstance(klass, (type, types.ClassType))
+def make_set_closure_cell():
+    """Return a function of two arguments (cell, value) which sets
+    the value stored in the closure cell `cell` to `value`.
+    """
+    # pypy makes this easy. (It also supports the logic below, but
+    # why not do the easy/fast thing?)
+    if PYPY:
 
-    # TYPE is used in exceptions, repr(int) is different on Python 2 and 3.
-    TYPE = "type"
+        def set_closure_cell(cell, value):
+            cell.__setstate__((value,))
+
+        return set_closure_cell
 
-    def iteritems(d):
-        return d.iteritems()
+    # Otherwise gotta do it the hard way.
 
-    def iterkeys(d):
-        return d.iterkeys()
+    # Create a function that will set its first cellvar to `value`.
+    def set_first_cellvar_to(value):
+        x = value
+        return
 
-    # Python 2 is bereft of a read-only dict proxy, so we make one!
-    class ReadOnlyDict(IterableUserDict):
-        """
-        Best-effort read-only dict wrapper.
-        """
+        # This function will be eliminated as dead code, but
+        # not before its reference to `x` forces `x` to be
+        # represented as a closure cell rather than a local.
+        def force_x_to_be_a_cell():  # pragma: no cover
+            return x
 
-        def __setitem__(self, key, val):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise TypeError("'mappingproxy' object does not support item "
-                            "assignment")
+    try:
+        # Extract the code object and make sure our assumptions about
+        # the closure behavior are correct.
+        co = set_first_cellvar_to.__code__
+        if co.co_cellvars != ("x",) or co.co_freevars != ():
+            raise AssertionError  # pragma: no cover
 
-        def update(self, _):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise AttributeError("'mappingproxy' object has no attribute "
-                                 "'update'")
+        # Convert this code object to a code object that sets the
+        # function's first _freevar_ (not cellvar) to the argument.
+        if sys.version_info >= (3, 8):
 
-        def __delitem__(self, _):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise TypeError("'mappingproxy' object does not support item "
-                            "deletion")
+            def set_closure_cell(cell, value):
+                cell.cell_contents = value
 
-        def clear(self):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise AttributeError("'mappingproxy' object has no attribute "
-                                 "'clear'")
-
-        def pop(self, key, default=None):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise AttributeError("'mappingproxy' object has no attribute "
-                                 "'pop'")
+        else:
+            args = [co.co_argcount]
+            args.append(co.co_kwonlyargcount)
+            args.extend(
+                [
+                    co.co_nlocals,
+                    co.co_stacksize,
+                    co.co_flags,
+                    co.co_code,
+                    co.co_consts,
+                    co.co_names,
+                    co.co_varnames,
+                    co.co_filename,
+                    co.co_name,
+                    co.co_firstlineno,
+                    co.co_lnotab,
+                    # These two arguments are reversed:
+                    co.co_cellvars,
+                    co.co_freevars,
+                ]
+            )
+            set_first_freevar_code = types.CodeType(*args)
 
-        def popitem(self):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise AttributeError("'mappingproxy' object has no attribute "
-                                 "'popitem'")
-
-        def setdefault(self, key, default=None):
-            # We gently pretend we're a Python 3 mappingproxy.
-            raise AttributeError("'mappingproxy' object has no attribute "
-                                 "'setdefault'")
+            def set_closure_cell(cell, value):
+                # Create a function using the set_first_freevar_code,
+                # whose first closure cell is `cell`. Calling it will
+                # change the value of that cell.
+                setter = types.FunctionType(
+                    set_first_freevar_code, {}, "setter", (), (cell,)
+                )
+                # And call it to set the cell.
+                setter(value)
 
-        def __repr__(self):
-            # Override to be identical to the Python 3 version.
-            return "mappingproxy(" + repr(self.data) + ")"
+        # Make sure it works on this interpreter:
+        def make_func_with_cell():
+            x = None
+
+            def func():
+                return x  # pragma: no cover
 
-    def metadata_proxy(d):
-        res = ReadOnlyDict()
-        res.data.update(d)  # We blocked update, so we have to do it like this.
-        return res
+            return func
+
+        cell = make_func_with_cell().__closure__[0]
+        set_closure_cell(cell, 100)
+        if cell.cell_contents != 100:
+            raise AssertionError  # pragma: no cover
 
-else:
-    def isclass(klass):
-        return isinstance(klass, type)
+    except Exception:
+        return just_warn
+    else:
+        return set_closure_cell
 
-    TYPE = "class"
+
+set_closure_cell = make_set_closure_cell()
 
-    def iteritems(d):
-        return d.items()
-
-    def iterkeys(d):
-        return d.keys()
-
-    def metadata_proxy(d):
-        return types.MappingProxyType(dict(d))
+# Thread-local global to track attrs instances which are already being repr'd.
+# This is needed because there is no other (thread-safe) way to pass info
+# about the instances that are already being repr'd through the call stack
+# in order to ensure we don't perform infinite recursion.
+#
+# For instance, if an instance contains a dict which contains that instance,
+# we need to know that we're already repr'ing the outside instance from within
+# the dict's repr() call.
+#
+# This lives here rather than in _make.py so that the functions in _make.py
+# don't have a direct reference to the thread-local in their globals dict.
+# If they have such a reference, it breaks cloudpickle.
+repr_context = threading.local()
--- a/mercurial/thirdparty/attr/_config.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/_config.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, division, print_function
+# SPDX-License-Identifier: MIT
 
 
 __all__ = ["set_run_validators", "get_run_validators"]
@@ -9,6 +9,10 @@
 def set_run_validators(run):
     """
     Set whether or not validators are run.  By default, they are run.
+
+    .. deprecated:: 21.3.0 It will not be removed, but it also will not be
+        moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()`
+        instead.
     """
     if not isinstance(run, bool):
         raise TypeError("'run' must be bool.")
@@ -19,5 +23,9 @@
 def get_run_validators():
     """
     Return whether or not validators are run.
+
+    .. deprecated:: 21.3.0 It will not be removed, but it also will not be
+        moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()`
+        instead.
     """
     return _run_validators
--- a/mercurial/thirdparty/attr/_funcs.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/_funcs.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,14 +1,20 @@
-from __future__ import absolute_import, division, print_function
+# SPDX-License-Identifier: MIT
+
 
 import copy
 
-from ._compat import iteritems
-from ._make import NOTHING, fields, _obj_setattr
+from ._make import NOTHING, _obj_setattr, fields
 from .exceptions import AttrsAttributeNotFoundError
 
 
-def asdict(inst, recurse=True, filter=None, dict_factory=dict,
-           retain_collection_types=False):
+def asdict(
+    inst,
+    recurse=True,
+    filter=None,
+    dict_factory=dict,
+    retain_collection_types=False,
+    value_serializer=None,
+):
     """
     Return the ``attrs`` attribute values of *inst* as a dict.
 
@@ -17,9 +23,9 @@
     :param inst: Instance of an ``attrs``-decorated class.
     :param bool recurse: Recurse into classes that are also
         ``attrs``-decorated.
-    :param callable filter: A callable whose return code deteremines whether an
+    :param callable filter: A callable whose return code determines whether an
         attribute or element is included (``True``) or dropped (``False``).  Is
-        called with the :class:`attr.Attribute` as the first argument and the
+        called with the `attrs.Attribute` as the first argument and the
         value as the second argument.
     :param callable dict_factory: A callable to produce dictionaries from.  For
         example, to produce ordered dictionaries instead of normal Python
@@ -27,6 +33,10 @@
     :param bool retain_collection_types: Do not convert to ``list`` when
         encountering an attribute whose type is ``tuple`` or ``set``.  Only
         meaningful if ``recurse`` is ``True``.
+    :param Optional[callable] value_serializer: A hook that is called for every
+        attribute or dict key/value.  It receives the current instance, field
+        and value and must return the (updated) value.  The hook is run *after*
+        the optional *filter* has been applied.
 
     :rtype: return type of *dict_factory*
 
@@ -35,6 +45,9 @@
 
     ..  versionadded:: 16.0.0 *dict_factory*
     ..  versionadded:: 16.1.0 *retain_collection_types*
+    ..  versionadded:: 20.3.0 *value_serializer*
+    ..  versionadded:: 21.3.0 If a dict has a collection for a key, it is
+        serialized as a tuple.
     """
     attrs = fields(inst.__class__)
     rv = dict_factory()
@@ -42,24 +55,58 @@
         v = getattr(inst, a.name)
         if filter is not None and not filter(a, v):
             continue
+
+        if value_serializer is not None:
+            v = value_serializer(inst, a, v)
+
         if recurse is True:
             if has(v.__class__):
-                rv[a.name] = asdict(v, recurse=True, filter=filter,
-                                    dict_factory=dict_factory)
-            elif isinstance(v, (tuple, list, set)):
+                rv[a.name] = asdict(
+                    v,
+                    recurse=True,
+                    filter=filter,
+                    dict_factory=dict_factory,
+                    retain_collection_types=retain_collection_types,
+                    value_serializer=value_serializer,
+                )
+            elif isinstance(v, (tuple, list, set, frozenset)):
                 cf = v.__class__ if retain_collection_types is True else list
-                rv[a.name] = cf([
-                    asdict(i, recurse=True, filter=filter,
-                           dict_factory=dict_factory)
-                    if has(i.__class__) else i
-                    for i in v
-                ])
+                rv[a.name] = cf(
+                    [
+                        _asdict_anything(
+                            i,
+                            is_key=False,
+                            filter=filter,
+                            dict_factory=dict_factory,
+                            retain_collection_types=retain_collection_types,
+                            value_serializer=value_serializer,
+                        )
+                        for i in v
+                    ]
+                )
             elif isinstance(v, dict):
                 df = dict_factory
-                rv[a.name] = df((
-                    asdict(kk, dict_factory=df) if has(kk.__class__) else kk,
-                    asdict(vv, dict_factory=df) if has(vv.__class__) else vv)
-                    for kk, vv in iteritems(v))
+                rv[a.name] = df(
+                    (
+                        _asdict_anything(
+                            kk,
+                            is_key=True,
+                            filter=filter,
+                            dict_factory=df,
+                            retain_collection_types=retain_collection_types,
+                            value_serializer=value_serializer,
+                        ),
+                        _asdict_anything(
+                            vv,
+                            is_key=False,
+                            filter=filter,
+                            dict_factory=df,
+                            retain_collection_types=retain_collection_types,
+                            value_serializer=value_serializer,
+                        ),
+                    )
+                    for kk, vv in v.items()
+                )
             else:
                 rv[a.name] = v
         else:
@@ -67,8 +114,86 @@
     return rv
 
 
-def astuple(inst, recurse=True, filter=None, tuple_factory=tuple,
-            retain_collection_types=False):
+def _asdict_anything(
+    val,
+    is_key,
+    filter,
+    dict_factory,
+    retain_collection_types,
+    value_serializer,
+):
+    """
+    ``asdict`` only works on attrs instances, this works on anything.
+    """
+    if getattr(val.__class__, "__attrs_attrs__", None) is not None:
+        # Attrs class.
+        rv = asdict(
+            val,
+            recurse=True,
+            filter=filter,
+            dict_factory=dict_factory,
+            retain_collection_types=retain_collection_types,
+            value_serializer=value_serializer,
+        )
+    elif isinstance(val, (tuple, list, set, frozenset)):
+        if retain_collection_types is True:
+            cf = val.__class__
+        elif is_key:
+            cf = tuple
+        else:
+            cf = list
+
+        rv = cf(
+            [
+                _asdict_anything(
+                    i,
+                    is_key=False,
+                    filter=filter,
+                    dict_factory=dict_factory,
+                    retain_collection_types=retain_collection_types,
+                    value_serializer=value_serializer,
+                )
+                for i in val
+            ]
+        )
+    elif isinstance(val, dict):
+        df = dict_factory
+        rv = df(
+            (
+                _asdict_anything(
+                    kk,
+                    is_key=True,
+                    filter=filter,
+                    dict_factory=df,
+                    retain_collection_types=retain_collection_types,
+                    value_serializer=value_serializer,
+                ),
+                _asdict_anything(
+                    vv,
+                    is_key=False,
+                    filter=filter,
+                    dict_factory=df,
+                    retain_collection_types=retain_collection_types,
+                    value_serializer=value_serializer,
+                ),
+            )
+            for kk, vv in val.items()
+        )
+    else:
+        rv = val
+        if value_serializer is not None:
+            rv = value_serializer(None, None, rv)
+
+    return rv
+
+
+def astuple(
+    inst,
+    recurse=True,
+    filter=None,
+    tuple_factory=tuple,
+    retain_collection_types=False,
+):
     """
     Return the ``attrs`` attribute values of *inst* as a tuple.
 
@@ -79,7 +204,7 @@
         ``attrs``-decorated.
     :param callable filter: A callable whose return code determines whether an
         attribute or element is included (``True``) or dropped (``False``).  Is
-        called with the :class:`attr.Attribute` as the first argument and the
+        called with the `attrs.Attribute` as the first argument and the
         value as the second argument.
     :param callable tuple_factory: A callable to produce tuples from.  For
         example, to produce lists instead of tuples.
@@ -104,38 +229,61 @@
             continue
         if recurse is True:
             if has(v.__class__):
-                rv.append(astuple(v, recurse=True, filter=filter,
-                                  tuple_factory=tuple_factory,
-                                  retain_collection_types=retain))
-            elif isinstance(v, (tuple, list, set)):
+                rv.append(
+                    astuple(
+                        v,
+                        recurse=True,
+                        filter=filter,
+                        tuple_factory=tuple_factory,
+                        retain_collection_types=retain,
+                    )
+                )
+            elif isinstance(v, (tuple, list, set, frozenset)):
                 cf = v.__class__ if retain is True else list
-                rv.append(cf([
-                    astuple(j, recurse=True, filter=filter,
-                            tuple_factory=tuple_factory,
-                            retain_collection_types=retain)
-                    if has(j.__class__) else j
-                    for j in v
-                ]))
+                rv.append(
+                    cf(
+                        [
+                            astuple(
+                                j,
+                                recurse=True,
+                                filter=filter,
+                                tuple_factory=tuple_factory,
+                                retain_collection_types=retain,
+                            )
+                            if has(j.__class__)
+                            else j
+                            for j in v
+                        ]
+                    )
+                )
             elif isinstance(v, dict):
                 df = v.__class__ if retain is True else dict
-                rv.append(df(
+                rv.append(
+                    df(
                         (
                             astuple(
                                 kk,
                                 tuple_factory=tuple_factory,
-                                retain_collection_types=retain
-                            ) if has(kk.__class__) else kk,
+                                retain_collection_types=retain,
+                            )
+                            if has(kk.__class__)
+                            else kk,
                             astuple(
                                 vv,
                                 tuple_factory=tuple_factory,
-                                retain_collection_types=retain
-                            ) if has(vv.__class__) else vv
+                                retain_collection_types=retain,
+                            )
+                            if has(vv.__class__)
+                            else vv,
                         )
-                        for kk, vv in iteritems(v)))
+                        for kk, vv in v.items()
+                    )
+                )
             else:
                 rv.append(v)
         else:
             rv.append(v)
+
     return rv if tuple_factory is list else tuple_factory(rv)
 
 
@@ -146,7 +294,7 @@
     :param type cls: Class to introspect.
     :raise TypeError: If *cls* is not a class.
 
-    :rtype: :class:`bool`
+    :rtype: bool
     """
     return getattr(cls, "__attrs_attrs__", None) is not None
 
@@ -166,19 +314,26 @@
         class.
 
     ..  deprecated:: 17.1.0
-        Use :func:`evolve` instead.
+        Use `attrs.evolve` instead if you can.
+        This function will not be removed du to the slightly different approach
+        compared to `attrs.evolve`.
     """
     import warnings
-    warnings.warn("assoc is deprecated and will be removed after 2018/01.",
-                  DeprecationWarning)
+
+    warnings.warn(
+        "assoc is deprecated and will be removed after 2018/01.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
     new = copy.copy(inst)
     attrs = fields(inst.__class__)
-    for k, v in iteritems(changes):
+    for k, v in changes.items():
         a = getattr(attrs, k, NOTHING)
         if a is NOTHING:
             raise AttrsAttributeNotFoundError(
-                "{k} is not an attrs attribute on {cl}."
-                .format(k=k, cl=new.__class__)
+                "{k} is not an attrs attribute on {cl}.".format(
+                    k=k, cl=new.__class__
+                )
             )
         _obj_setattr(new, k, v)
     return new
@@ -209,4 +364,57 @@
         init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
         if init_name not in changes:
             changes[init_name] = getattr(inst, attr_name)
+
     return cls(**changes)
+
+
+def resolve_types(cls, globalns=None, localns=None, attribs=None):
+    """
+    Resolve any strings and forward annotations in type annotations.
+
+    This is only required if you need concrete types in `Attribute`'s *type*
+    field. In other words, you don't need to resolve your types if you only
+    use them for static type checking.
+
+    With no arguments, names will be looked up in the module in which the class
+    was created. If this is not what you want, e.g. if the name only exists
+    inside a method, you may pass *globalns* or *localns* to specify other
+    dictionaries in which to look up these names. See the docs of
+    `typing.get_type_hints` for more details.
+
+    :param type cls: Class to resolve.
+    :param Optional[dict] globalns: Dictionary containing global variables.
+    :param Optional[dict] localns: Dictionary containing local variables.
+    :param Optional[list] attribs: List of attribs for the given class.
+        This is necessary when calling from inside a ``field_transformer``
+        since *cls* is not an ``attrs`` class yet.
+
+    :raise TypeError: If *cls* is not a class.
+    :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+        class and you didn't pass any attribs.
+    :raise NameError: If types cannot be resolved because of missing variables.
+
+    :returns: *cls* so you can use this function also as a class decorator.
+        Please note that you have to apply it **after** `attrs.define`. That
+        means the decorator has to come in the line **before** `attrs.define`.
+
+    ..  versionadded:: 20.1.0
+    ..  versionadded:: 21.1.0 *attribs*
+
+    """
+    # Since calling get_type_hints is expensive we cache whether we've
+    # done it already.
+    if getattr(cls, "__attrs_types_resolved__", None) != cls:
+        import typing
+
+        hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
+        for field in fields(cls) if attribs is None else attribs:
+            if field.name in hints:
+                # Since fields have been frozen we must work around it.
+                _obj_setattr(field, "type", hints[field.name])
+        # We store the class we resolved so that subclasses know they haven't
+        # been resolved.
+        cls.__attrs_types_resolved__ = cls
+
+    # Return the class so you can use it as a decorator too.
+    return cls
--- a/mercurial/thirdparty/attr/_make.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/_make.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,50 +1,79 @@
-from __future__ import absolute_import, division, print_function
-
-import hashlib
+# SPDX-License-Identifier: MIT
+
+import copy
 import linecache
+import sys
+import types
+import typing
 
 from operator import itemgetter
 
-from . import _config
-from ._compat import PY2, iteritems, isclass, iterkeys, metadata_proxy
+# We need to import _compat itself in addition to the _compat members to avoid
+# having the thread-local in the globals here.
+from . import _compat, _config, setters
+from ._compat import (
+    HAS_F_STRINGS,
+    PY310,
+    PYPY,
+    _AnnotationExtractor,
+    ordered_dict,
+    set_closure_cell,
+)
 from .exceptions import (
     DefaultAlreadySetError,
     FrozenInstanceError,
     NotAnAttrsClassError,
+    UnannotatedAttributeError,
 )
 
 
 # This is used at least twice, so cache it here.
 _obj_setattr = object.__setattr__
-_init_convert_pat = "__attr_convert_{}"
+_init_converter_pat = "__attr_converter_%s"
 _init_factory_pat = "__attr_factory_{}"
-_tuple_property_pat = "    {attr_name} = property(itemgetter({index}))"
-_empty_metadata_singleton = metadata_proxy({})
-
-
-class _Nothing(object):
+_tuple_property_pat = (
+    "    {attr_name} = _attrs_property(_attrs_itemgetter({index}))"
+)
+_classvar_prefixes = (
+    "typing.ClassVar",
+    "t.ClassVar",
+    "ClassVar",
+    "typing_extensions.ClassVar",
+)
+# we don't use a double-underscore prefix because that triggers
+# name mangling when trying to create a slot for the field
+# (when slots=True)
+_hash_cache_field = "_attrs_cached_hash"
+
+_empty_metadata_singleton = types.MappingProxyType({})
+
+# Unique object for unequivocal getattr() defaults.
+_sentinel = object()
+
+_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate)
+
+
+class _Nothing:
     """
     Sentinel class to indicate the lack of a value when ``None`` is ambiguous.
 
-    All instances of `_Nothing` are equal.
+    ``_Nothing`` is a singleton. There is only ever one of it.
+
+    .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
     """
-    def __copy__(self):
-        return self
-
-    def __deepcopy__(self, _):
-        return self
-
-    def __eq__(self, other):
-        return other.__class__ == _Nothing
-
-    def __ne__(self, other):
-        return not self == other
+
+    _singleton = None
+
+    def __new__(cls):
+        if _Nothing._singleton is None:
+            _Nothing._singleton = super().__new__(cls)
+        return _Nothing._singleton
 
     def __repr__(self):
         return "NOTHING"
 
-    def __hash__(self):
-        return 0xdeadbeef
+    def __bool__(self):
+        return False
 
 
 NOTHING = _Nothing()
@@ -53,92 +82,255 @@
 """
 
 
-def attr(default=NOTHING, validator=None,
-         repr=True, cmp=True, hash=None, init=True,
-         convert=None, metadata={}):
-    r"""
+class _CacheHashWrapper(int):
+    """
+    An integer subclass that pickles / copies as None
+
+    This is used for non-slots classes with ``cache_hash=True``, to avoid
+    serializing a potentially (even likely) invalid hash value. Since ``None``
+    is the default value for uncalculated hashes, whenever this is copied,
+    the copy's value for the hash should automatically reset.
+
+    See GH #613 for more details.
+    """
+
+    def __reduce__(self, _none_constructor=type(None), _args=()):
+        return _none_constructor, _args
+
+
+def attrib(
+    default=NOTHING,
+    validator=None,
+    repr=True,
+    cmp=None,
+    hash=None,
+    init=True,
+    metadata=None,
+    type=None,
+    converter=None,
+    factory=None,
+    kw_only=False,
+    eq=None,
+    order=None,
+    on_setattr=None,
+):
+    """
     Create a new attribute on a class.
 
     ..  warning::
 
         Does *not* do anything unless the class is also decorated with
-        :func:`attr.s`!
+        `attr.s`!
 
     :param default: A value that is used if an ``attrs``-generated ``__init__``
         is used and no value is passed while instantiating or the attribute is
         excluded using ``init=False``.
 
-        If the value is an instance of :class:`Factory`, its callable will be
-        used to construct a new value (useful for mutable datatypes like lists
+        If the value is an instance of `attrs.Factory`, its callable will be
+        used to construct a new value (useful for mutable data types like lists
         or dicts).
 
-        If a default is not set (or set manually to ``attr.NOTHING``), a value
-        *must* be supplied when instantiating; otherwise a :exc:`TypeError`
+        If a default is not set (or set manually to `attrs.NOTHING`), a value
+        *must* be supplied when instantiating; otherwise a `TypeError`
         will be raised.
 
         The default can also be set using decorator notation as shown below.
 
-    :type default: Any value.
-
-    :param validator: :func:`callable` that is called by ``attrs``-generated
+    :type default: Any value
+
+    :param callable factory: Syntactic sugar for
+        ``default=attr.Factory(factory)``.
+
+    :param validator: `callable` that is called by ``attrs``-generated
         ``__init__`` methods after the instance has been initialized.  They
-        receive the initialized instance, the :class:`Attribute`, and the
+        receive the initialized instance, the :func:`~attrs.Attribute`, and the
         passed value.
 
         The return value is *not* inspected so the validator has to throw an
         exception itself.
 
-        If a ``list`` is passed, its items are treated as validators and must
+        If a `list` is passed, its items are treated as validators and must
         all pass.
 
         Validators can be globally disabled and re-enabled using
-        :func:`get_run_validators`.
+        `get_run_validators`.
 
         The validator can also be set using decorator notation as shown below.
 
-    :type validator: ``callable`` or a ``list`` of ``callable``\ s.
-
-    :param bool repr: Include this attribute in the generated ``__repr__``
-        method.
-    :param bool cmp: Include this attribute in the generated comparison methods
-        (``__eq__`` et al).
-    :param hash: Include this attribute in the generated ``__hash__``
-        method.  If ``None`` (default), mirror *cmp*'s value.  This is the
-        correct behavior according the Python spec.  Setting this value to
-        anything else than ``None`` is *discouraged*.
-    :type hash: ``bool`` or ``None``
+    :type validator: `callable` or a `list` of `callable`\\ s.
+
+    :param repr: Include this attribute in the generated ``__repr__``
+        method. If ``True``, include the attribute; if ``False``, omit it. By
+        default, the built-in ``repr()`` function is used. To override how the
+        attribute value is formatted, pass a ``callable`` that takes a single
+        value and returns a string. Note that the resulting string is used
+        as-is, i.e. it will be used directly *instead* of calling ``repr()``
+        (the default).
+    :type repr: a `bool` or a `callable` to use a custom function.
+
+    :param eq: If ``True`` (default), include this attribute in the
+        generated ``__eq__`` and ``__ne__`` methods that check two instances
+        for equality. To override how the attribute value is compared,
+        pass a ``callable`` that takes a single value and returns the value
+        to be compared.
+    :type eq: a `bool` or a `callable`.
+
+    :param order: If ``True`` (default), include this attributes in the
+        generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
+        To override how the attribute value is ordered,
+        pass a ``callable`` that takes a single value and returns the value
+        to be ordered.
+    :type order: a `bool` or a `callable`.
+
+    :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
+        same value. Must not be mixed with *eq* or *order*.
+    :type cmp: a `bool` or a `callable`.
+
+    :param Optional[bool] hash: Include this attribute in the generated
+        ``__hash__`` method.  If ``None`` (default), mirror *eq*'s value.  This
+        is the correct behavior according the Python spec.  Setting this value
+        to anything else than ``None`` is *discouraged*.
     :param bool init: Include this attribute in the generated ``__init__``
         method.  It is possible to set this to ``False`` and set a default
         value.  In that case this attributed is unconditionally initialized
         with the specified default value or factory.
-    :param callable convert: :func:`callable` that is called by
+    :param callable converter: `callable` that is called by
         ``attrs``-generated ``__init__`` methods to convert attribute's value
         to the desired format.  It is given the passed-in value, and the
         returned value will be used as the new value of the attribute.  The
         value is converted before being passed to the validator, if any.
     :param metadata: An arbitrary mapping, to be used by third-party
-        components.  See :ref:`extending_metadata`.
-
-    ..  versionchanged:: 17.1.0 *validator* can be a ``list`` now.
-    ..  versionchanged:: 17.1.0
-        *hash* is ``None`` and therefore mirrors *cmp* by default .
+        components.  See `extending_metadata`.
+    :param type: The type of the attribute.  In Python 3.6 or greater, the
+        preferred method to specify the type is using a variable annotation
+        (see :pep:`526`).
+        This argument is provided for backward compatibility.
+        Regardless of the approach used, the type will be stored on
+        ``Attribute.type``.
+
+        Please note that ``attrs`` doesn't do anything with this metadata by
+        itself. You can use it as part of your own code or for
+        `static type checking <types>`.
+    :param kw_only: Make this attribute keyword-only (Python 3+)
+        in the generated ``__init__`` (if ``init`` is ``False``, this
+        parameter is ignored).
+    :param on_setattr: Allows to overwrite the *on_setattr* setting from
+        `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
+        Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this
+        attribute -- regardless of the setting in `attr.s`.
+    :type on_setattr: `callable`, or a list of callables, or `None`, or
+        `attrs.setters.NO_OP`
+
+    .. versionadded:: 15.2.0 *convert*
+    .. versionadded:: 16.3.0 *metadata*
+    .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
+    .. versionchanged:: 17.1.0
+       *hash* is ``None`` and therefore mirrors *eq* by default.
+    .. versionadded:: 17.3.0 *type*
+    .. deprecated:: 17.4.0 *convert*
+    .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
+       *convert* to achieve consistency with other noun-based arguments.
+    .. versionadded:: 18.1.0
+       ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
+    .. versionadded:: 18.2.0 *kw_only*
+    .. versionchanged:: 19.2.0 *convert* keyword argument removed.
+    .. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
+    .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+    .. versionadded:: 19.2.0 *eq* and *order*
+    .. versionadded:: 20.1.0 *on_setattr*
+    .. versionchanged:: 20.3.0 *kw_only* backported to Python 2
+    .. versionchanged:: 21.1.0
+       *eq*, *order*, and *cmp* also accept a custom callable
+    .. versionchanged:: 21.1.0 *cmp* undeprecated
     """
+    eq, eq_key, order, order_key = _determine_attrib_eq_order(
+        cmp, eq, order, True
+    )
+
     if hash is not None and hash is not True and hash is not False:
         raise TypeError(
             "Invalid value for hash.  Must be True, False, or None."
         )
+
+    if factory is not None:
+        if default is not NOTHING:
+            raise ValueError(
+                "The `default` and `factory` arguments are mutually "
+                "exclusive."
+            )
+        if not callable(factory):
+            raise ValueError("The `factory` argument must be a callable.")
+        default = Factory(factory)
+
+    if metadata is None:
+        metadata = {}
+
+    # Apply syntactic sugar by auto-wrapping.
+    if isinstance(on_setattr, (list, tuple)):
+        on_setattr = setters.pipe(*on_setattr)
+
+    if validator and isinstance(validator, (list, tuple)):
+        validator = and_(*validator)
+
+    if converter and isinstance(converter, (list, tuple)):
+        converter = pipe(*converter)
+
     return _CountingAttr(
         default=default,
         validator=validator,
         repr=repr,
-        cmp=cmp,
+        cmp=None,
         hash=hash,
         init=init,
-        convert=convert,
+        converter=converter,
         metadata=metadata,
+        type=type,
+        kw_only=kw_only,
+        eq=eq,
+        eq_key=eq_key,
+        order=order,
+        order_key=order_key,
+        on_setattr=on_setattr,
     )
 
 
+def _compile_and_eval(script, globs, locs=None, filename=""):
+    """
+    "Exec" the script with the given global (globs) and local (locs) variables.
+    """
+    bytecode = compile(script, filename, "exec")
+    eval(bytecode, globs, locs)
+
+
+def _make_method(name, script, filename, globs):
+    """
+    Create the method with the script given and return the method object.
+    """
+    locs = {}
+
+    # In order of debuggers like PDB being able to step through the code,
+    # we add a fake linecache entry.
+    count = 1
+    base_filename = filename
+    while True:
+        linecache_tuple = (
+            len(script),
+            None,
+            script.splitlines(True),
+            filename,
+        )
+        old_val = linecache.cache.setdefault(filename, linecache_tuple)
+        if old_val == linecache_tuple:
+            break
+        else:
+            filename = "{}-{}>".format(base_filename[:-1], count)
+            count += 1
+
+    _compile_and_eval(script, globs, locs, filename)
+
+    return locs[name]
+
+
 def _make_attr_tuple_class(cls_name, attr_names):
     """
     Create a tuple subclass to hold `Attribute`s for an `attrs` class.
@@ -156,75 +348,273 @@
     ]
     if attr_names:
         for i, attr_name in enumerate(attr_names):
-            attr_class_template.append(_tuple_property_pat.format(
-                index=i,
-                attr_name=attr_name,
-            ))
+            attr_class_template.append(
+                _tuple_property_pat.format(index=i, attr_name=attr_name)
+            )
     else:
         attr_class_template.append("    pass")
-    globs = {"itemgetter": itemgetter}
-    eval(compile("\n".join(attr_class_template), "", "exec"), globs)
+    globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
+    _compile_and_eval("\n".join(attr_class_template), globs)
     return globs[attr_class_name]
 
 
-def _transform_attrs(cls, these):
+# Tuple class for extracted attributes from a class definition.
+# `base_attrs` is a subset of `attrs`.
+_Attributes = _make_attr_tuple_class(
+    "_Attributes",
+    [
+        # all attributes to build dunder methods for
+        "attrs",
+        # attributes that have been inherited
+        "base_attrs",
+        # map inherited attributes to their originating classes
+        "base_attrs_map",
+    ],
+)
+
+
+def _is_class_var(annot):
+    """
+    Check whether *annot* is a typing.ClassVar.
+
+    The string comparison hack is used to avoid evaluating all string
+    annotations which would put attrs-based classes at a performance
+    disadvantage compared to plain old classes.
+    """
+    annot = str(annot)
+
+    # Annotation can be quoted.
+    if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
+        annot = annot[1:-1]
+
+    return annot.startswith(_classvar_prefixes)
+
+
+def _has_own_attribute(cls, attrib_name):
+    """
+    Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
+
+    Requires Python 3.
+    """
+    attr = getattr(cls, attrib_name, _sentinel)
+    if attr is _sentinel:
+        return False
+
+    for base_cls in cls.__mro__[1:]:
+        a = getattr(base_cls, attrib_name, None)
+        if attr is a:
+            return False
+
+    return True
+
+
+def _get_annotations(cls):
+    """
+    Get annotations for *cls*.
+    """
+    if _has_own_attribute(cls, "__annotations__"):
+        return cls.__annotations__
+
+    return {}
+
+
+def _counter_getter(e):
+    """
+    Key function for sorting to avoid re-creating a lambda for every class.
     """
-    Transforms all `_CountingAttr`s on a class into `Attribute`s and saves the
-    list in `__attrs_attrs__`.
+    return e[1].counter
+
+
+def _collect_base_attrs(cls, taken_attr_names):
+    """
+    Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+    """
+    base_attrs = []
+    base_attr_map = {}  # A dictionary of base attrs to their classes.
+
+    # Traverse the MRO and collect attributes.
+    for base_cls in reversed(cls.__mro__[1:-1]):
+        for a in getattr(base_cls, "__attrs_attrs__", []):
+            if a.inherited or a.name in taken_attr_names:
+                continue
+
+            a = a.evolve(inherited=True)
+            base_attrs.append(a)
+            base_attr_map[a.name] = base_cls
+
+    # For each name, only keep the freshest definition i.e. the furthest at the
+    # back.  base_attr_map is fine because it gets overwritten with every new
+    # instance.
+    filtered = []
+    seen = set()
+    for a in reversed(base_attrs):
+        if a.name in seen:
+            continue
+        filtered.insert(0, a)
+        seen.add(a.name)
+
+    return filtered, base_attr_map
+
+
+def _collect_base_attrs_broken(cls, taken_attr_names):
+    """
+    Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+
+    N.B. *taken_attr_names* will be mutated.
+
+    Adhere to the old incorrect behavior.
+
+    Notably it collects from the front and considers inherited attributes which
+    leads to the buggy behavior reported in #428.
+    """
+    base_attrs = []
+    base_attr_map = {}  # A dictionary of base attrs to their classes.
+
+    # Traverse the MRO and collect attributes.
+    for base_cls in cls.__mro__[1:-1]:
+        for a in getattr(base_cls, "__attrs_attrs__", []):
+            if a.name in taken_attr_names:
+                continue
+
+            a = a.evolve(inherited=True)
+            taken_attr_names.add(a.name)
+            base_attrs.append(a)
+            base_attr_map[a.name] = base_cls
+
+    return base_attrs, base_attr_map
+
+
+def _transform_attrs(
+    cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
+):
+    """
+    Transform all `_CountingAttr`s on a class into `Attribute`s.
 
     If *these* is passed, use that and don't look for them on the class.
+
+    *collect_by_mro* is True, collect them in the correct MRO order, otherwise
+    use the old -- incorrect -- order.  See #428.
+
+    Return an `_Attributes`.
     """
-    super_cls = []
-    for c in reversed(cls.__mro__[1:-1]):
-        sub_attrs = getattr(c, "__attrs_attrs__", None)
-        if sub_attrs is not None:
-            super_cls.extend(a for a in sub_attrs if a not in super_cls)
-    if these is None:
-        ca_list = [(name, attr)
-                   for name, attr
-                   in cls.__dict__.items()
-                   if isinstance(attr, _CountingAttr)]
+    cd = cls.__dict__
+    anns = _get_annotations(cls)
+
+    if these is not None:
+        ca_list = [(name, ca) for name, ca in these.items()]
+
+        if not isinstance(these, ordered_dict):
+            ca_list.sort(key=_counter_getter)
+    elif auto_attribs is True:
+        ca_names = {
+            name
+            for name, attr in cd.items()
+            if isinstance(attr, _CountingAttr)
+        }
+        ca_list = []
+        annot_names = set()
+        for attr_name, type in anns.items():
+            if _is_class_var(type):
+                continue
+            annot_names.add(attr_name)
+            a = cd.get(attr_name, NOTHING)
+
+            if not isinstance(a, _CountingAttr):
+                if a is NOTHING:
+                    a = attrib()
+                else:
+                    a = attrib(default=a)
+            ca_list.append((attr_name, a))
+
+        unannotated = ca_names - annot_names
+        if len(unannotated) > 0:
+            raise UnannotatedAttributeError(
+                "The following `attr.ib`s lack a type annotation: "
+                + ", ".join(
+                    sorted(unannotated, key=lambda n: cd.get(n).counter)
+                )
+                + "."
+            )
     else:
-        ca_list = [(name, ca)
-                   for name, ca
-                   in iteritems(these)]
-
-    non_super_attrs = [
-        Attribute.from_counting_attr(name=attr_name, ca=ca)
-        for attr_name, ca
-        in sorted(ca_list, key=lambda e: e[1].counter)
+        ca_list = sorted(
+            (
+                (name, attr)
+                for name, attr in cd.items()
+                if isinstance(attr, _CountingAttr)
+            ),
+            key=lambda e: e[1].counter,
+        )
+
+    own_attrs = [
+        Attribute.from_counting_attr(
+            name=attr_name, ca=ca, type=anns.get(attr_name)
+        )
+        for attr_name, ca in ca_list
     ]
-    attr_names = [a.name for a in super_cls + non_super_attrs]
-
-    AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
-
-    cls.__attrs_attrs__ = AttrsClass(super_cls + [
-        Attribute.from_counting_attr(name=attr_name, ca=ca)
-        for attr_name, ca
-        in sorted(ca_list, key=lambda e: e[1].counter)
-    ])
-
+
+    if collect_by_mro:
+        base_attrs, base_attr_map = _collect_base_attrs(
+            cls, {a.name for a in own_attrs}
+        )
+    else:
+        base_attrs, base_attr_map = _collect_base_attrs_broken(
+            cls, {a.name for a in own_attrs}
+        )
+
+    if kw_only:
+        own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
+        base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
+
+    attrs = base_attrs + own_attrs
+
+    # Mandatory vs non-mandatory attr order only matters when they are part of
+    # the __init__ signature and when they aren't kw_only (which are moved to
+    # the end and can be mandatory or non-mandatory in any order, as they will
+    # be specified as keyword args anyway). Check the order of those attrs:
     had_default = False
-    for a in cls.__attrs_attrs__:
-        if these is None and a not in super_cls:
-            setattr(cls, a.name, a)
-        if had_default is True and a.default is NOTHING and a.init is True:
+    for a in (a for a in attrs if a.init is not False and a.kw_only is False):
+        if had_default is True and a.default is NOTHING:
             raise ValueError(
                 "No mandatory attributes allowed after an attribute with a "
-                "default value or factory.  Attribute in question: {a!r}"
-                .format(a=a)
+                "default value or factory.  Attribute in question: %r" % (a,)
             )
-        elif had_default is False and \
-                a.default is not NOTHING and \
-                a.init is not False:
+
+        if had_default is False and a.default is not NOTHING:
             had_default = True
 
-
-def _frozen_setattrs(self, name, value):
-    """
-    Attached to frozen classes as __setattr__.
-    """
-    raise FrozenInstanceError()
+    if field_transformer is not None:
+        attrs = field_transformer(cls, attrs)
+
+    # Create AttrsClass *after* applying the field_transformer since it may
+    # add or remove attributes!
+    attr_names = [a.name for a in attrs]
+    AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
+
+    return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map))
+
+
+if PYPY:
+
+    def _frozen_setattrs(self, name, value):
+        """
+        Attached to frozen classes as __setattr__.
+        """
+        if isinstance(self, BaseException) and name in (
+            "__cause__",
+            "__context__",
+        ):
+            BaseException.__setattr__(self, name, value)
+            return
+
+        raise FrozenInstanceError()
+
+else:
+
+    def _frozen_setattrs(self, name, value):
+        """
+        Attached to frozen classes as __setattr__.
+        """
+        raise FrozenInstanceError()
 
 
 def _frozen_delattrs(self, name):
@@ -234,44 +624,661 @@
     raise FrozenInstanceError()
 
 
-def attributes(maybe_cls=None, these=None, repr_ns=None,
-               repr=True, cmp=True, hash=None, init=True,
-               slots=False, frozen=False, str=False):
+class _ClassBuilder:
+    """
+    Iteratively build *one* class.
+    """
+
+    __slots__ = (
+        "_attr_names",
+        "_attrs",
+        "_base_attr_map",
+        "_base_names",
+        "_cache_hash",
+        "_cls",
+        "_cls_dict",
+        "_delete_attribs",
+        "_frozen",
+        "_has_pre_init",
+        "_has_post_init",
+        "_is_exc",
+        "_on_setattr",
+        "_slots",
+        "_weakref_slot",
+        "_wrote_own_setattr",
+        "_has_custom_setattr",
+    )
+
+    def __init__(
+        self,
+        cls,
+        these,
+        slots,
+        frozen,
+        weakref_slot,
+        getstate_setstate,
+        auto_attribs,
+        kw_only,
+        cache_hash,
+        is_exc,
+        collect_by_mro,
+        on_setattr,
+        has_custom_setattr,
+        field_transformer,
+    ):
+        attrs, base_attrs, base_map = _transform_attrs(
+            cls,
+            these,
+            auto_attribs,
+            kw_only,
+            collect_by_mro,
+            field_transformer,
+        )
+
+        self._cls = cls
+        self._cls_dict = dict(cls.__dict__) if slots else {}
+        self._attrs = attrs
+        self._base_names = {a.name for a in base_attrs}
+        self._base_attr_map = base_map
+        self._attr_names = tuple(a.name for a in attrs)
+        self._slots = slots
+        self._frozen = frozen
+        self._weakref_slot = weakref_slot
+        self._cache_hash = cache_hash
+        self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
+        self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
+        self._delete_attribs = not bool(these)
+        self._is_exc = is_exc
+        self._on_setattr = on_setattr
+
+        self._has_custom_setattr = has_custom_setattr
+        self._wrote_own_setattr = False
+
+        self._cls_dict["__attrs_attrs__"] = self._attrs
+
+        if frozen:
+            self._cls_dict["__setattr__"] = _frozen_setattrs
+            self._cls_dict["__delattr__"] = _frozen_delattrs
+
+            self._wrote_own_setattr = True
+        elif on_setattr in (
+            _ng_default_on_setattr,
+            setters.validate,
+            setters.convert,
+        ):
+            has_validator = has_converter = False
+            for a in attrs:
+                if a.validator is not None:
+                    has_validator = True
+                if a.converter is not None:
+                    has_converter = True
+
+                if has_validator and has_converter:
+                    break
+            if (
+                (
+                    on_setattr == _ng_default_on_setattr
+                    and not (has_validator or has_converter)
+                )
+                or (on_setattr == setters.validate and not has_validator)
+                or (on_setattr == setters.convert and not has_converter)
+            ):
+                # If class-level on_setattr is set to convert + validate, but
+                # there's no field to convert or validate, pretend like there's
+                # no on_setattr.
+                self._on_setattr = None
+
+        if getstate_setstate:
+            (
+                self._cls_dict["__getstate__"],
+                self._cls_dict["__setstate__"],
+            ) = self._make_getstate_setstate()
+
+    def __repr__(self):
+        return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__)
+
+    def build_class(self):
+        """
+        Finalize class based on the accumulated configuration.
+
+        Builder cannot be used after calling this method.
+        """
+        if self._slots is True:
+            return self._create_slots_class()
+        else:
+            return self._patch_original_class()
+
+    def _patch_original_class(self):
+        """
+        Apply accumulated methods and return the class.
+        """
+        cls = self._cls
+        base_names = self._base_names
+
+        # Clean class of attribute definitions (`attr.ib()`s).
+        if self._delete_attribs:
+            for name in self._attr_names:
+                if (
+                    name not in base_names
+                    and getattr(cls, name, _sentinel) is not _sentinel
+                ):
+                    try:
+                        delattr(cls, name)
+                    except AttributeError:
+                        # This can happen if a base class defines a class
+                        # variable and we want to set an attribute with the
+                        # same name by using only a type annotation.
+                        pass
+
+        # Attach our dunder methods.
+        for name, value in self._cls_dict.items():
+            setattr(cls, name, value)
+
+        # If we've inherited an attrs __setattr__ and don't write our own,
+        # reset it to object's.
+        if not self._wrote_own_setattr and getattr(
+            cls, "__attrs_own_setattr__", False
+        ):
+            cls.__attrs_own_setattr__ = False
+
+            if not self._has_custom_setattr:
+                cls.__setattr__ = _obj_setattr
+
+        return cls
+
+    def _create_slots_class(self):
+        """
+        Build and return a new class with a `__slots__` attribute.
+        """
+        cd = {
+            k: v
+            for k, v in self._cls_dict.items()
+            if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
+        }
+
+        # If our class doesn't have its own implementation of __setattr__
+        # (either from the user or by us), check the bases, if one of them has
+        # an attrs-made __setattr__, that needs to be reset. We don't walk the
+        # MRO because we only care about our immediate base classes.
+        # XXX: This can be confused by subclassing a slotted attrs class with
+        # XXX: a non-attrs class and subclass the resulting class with an attrs
+        # XXX: class.  See `test_slotted_confused` for details.  For now that's
+        # XXX: OK with us.
+        if not self._wrote_own_setattr:
+            cd["__attrs_own_setattr__"] = False
+
+            if not self._has_custom_setattr:
+                for base_cls in self._cls.__bases__:
+                    if base_cls.__dict__.get("__attrs_own_setattr__", False):
+                        cd["__setattr__"] = _obj_setattr
+                        break
+
+        # Traverse the MRO to collect existing slots
+        # and check for an existing __weakref__.
+        existing_slots = dict()
+        weakref_inherited = False
+        for base_cls in self._cls.__mro__[1:-1]:
+            if base_cls.__dict__.get("__weakref__", None) is not None:
+                weakref_inherited = True
+            existing_slots.update(
+                {
+                    name: getattr(base_cls, name)
+                    for name in getattr(base_cls, "__slots__", [])
+                }
+            )
+
+        base_names = set(self._base_names)
+
+        names = self._attr_names
+        if (
+            self._weakref_slot
+            and "__weakref__" not in getattr(self._cls, "__slots__", ())
+            and "__weakref__" not in names
+            and not weakref_inherited
+        ):
+            names += ("__weakref__",)
+
+        # We only add the names of attributes that aren't inherited.
+        # Setting __slots__ to inherited attributes wastes memory.
+        slot_names = [name for name in names if name not in base_names]
+        # There are slots for attributes from current class
+        # that are defined in parent classes.
+        # As their descriptors may be overridden by a child class,
+        # we collect them here and update the class dict
+        reused_slots = {
+            slot: slot_descriptor
+            for slot, slot_descriptor in existing_slots.items()
+            if slot in slot_names
+        }
+        slot_names = [name for name in slot_names if name not in reused_slots]
+        cd.update(reused_slots)
+        if self._cache_hash:
+            slot_names.append(_hash_cache_field)
+        cd["__slots__"] = tuple(slot_names)
+
+        cd["__qualname__"] = self._cls.__qualname__
+
+        # Create new class based on old class and our methods.
+        cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
+
+        # The following is a fix for
+        # <https://github.com/python-attrs/attrs/issues/102>.  On Python 3,
+        # if a method mentions `__class__` or uses the no-arg super(), the
+        # compiler will bake a reference to the class in the method itself
+        # as `method.__closure__`.  Since we replace the class with a
+        # clone, we rewrite these references so it keeps working.
+        for item in cls.__dict__.values():
+            if isinstance(item, (classmethod, staticmethod)):
+                # Class- and staticmethods hide their functions inside.
+                # These might need to be rewritten as well.
+                closure_cells = getattr(item.__func__, "__closure__", None)
+            elif isinstance(item, property):
+                # Workaround for property `super()` shortcut (PY3-only).
+                # There is no universal way for other descriptors.
+                closure_cells = getattr(item.fget, "__closure__", None)
+            else:
+                closure_cells = getattr(item, "__closure__", None)
+
+            if not closure_cells:  # Catch None or the empty list.
+                continue
+            for cell in closure_cells:
+                try:
+                    match = cell.cell_contents is self._cls
+                except ValueError:  # ValueError: Cell is empty
+                    pass
+                else:
+                    if match:
+                        set_closure_cell(cell, cls)
+
+        return cls
+
+    def add_repr(self, ns):
+        self._cls_dict["__repr__"] = self._add_method_dunders(
+            _make_repr(self._attrs, ns, self._cls)
+        )
+        return self
+
+    def add_str(self):
+        repr = self._cls_dict.get("__repr__")
+        if repr is None:
+            raise ValueError(
+                "__str__ can only be generated if a __repr__ exists."
+            )
+
+        def __str__(self):
+            return self.__repr__()
+
+        self._cls_dict["__str__"] = self._add_method_dunders(__str__)
+        return self
+
+    def _make_getstate_setstate(self):
+        """
+        Create custom __setstate__ and __getstate__ methods.
+        """
+        # __weakref__ is not writable.
+        state_attr_names = tuple(
+            an for an in self._attr_names if an != "__weakref__"
+        )
+
+        def slots_getstate(self):
+            """
+            Automatically created by attrs.
+            """
+            return tuple(getattr(self, name) for name in state_attr_names)
+
+        hash_caching_enabled = self._cache_hash
+
+        def slots_setstate(self, state):
+            """
+            Automatically created by attrs.
+            """
+            __bound_setattr = _obj_setattr.__get__(self, Attribute)
+            for name, value in zip(state_attr_names, state):
+                __bound_setattr(name, value)
+
+            # The hash code cache is not included when the object is
+            # serialized, but it still needs to be initialized to None to
+            # indicate that the first call to __hash__ should be a cache
+            # miss.
+            if hash_caching_enabled:
+                __bound_setattr(_hash_cache_field, None)
+
+        return slots_getstate, slots_setstate
+
+    def make_unhashable(self):
+        self._cls_dict["__hash__"] = None
+        return self
+
+    def add_hash(self):
+        self._cls_dict["__hash__"] = self._add_method_dunders(
+            _make_hash(
+                self._cls,
+                self._attrs,
+                frozen=self._frozen,
+                cache_hash=self._cache_hash,
+            )
+        )
+
+        return self
+
+    def add_init(self):
+        self._cls_dict["__init__"] = self._add_method_dunders(
+            _make_init(
+                self._cls,
+                self._attrs,
+                self._has_pre_init,
+                self._has_post_init,
+                self._frozen,
+                self._slots,
+                self._cache_hash,
+                self._base_attr_map,
+                self._is_exc,
+                self._on_setattr,
+                attrs_init=False,
+            )
+        )
+
+        return self
+
+    def add_match_args(self):
+        self._cls_dict["__match_args__"] = tuple(
+            field.name
+            for field in self._attrs
+            if field.init and not field.kw_only
+        )
+
+    def add_attrs_init(self):
+        self._cls_dict["__attrs_init__"] = self._add_method_dunders(
+            _make_init(
+                self._cls,
+                self._attrs,
+                self._has_pre_init,
+                self._has_post_init,
+                self._frozen,
+                self._slots,
+                self._cache_hash,
+                self._base_attr_map,
+                self._is_exc,
+                self._on_setattr,
+                attrs_init=True,
+            )
+        )
+
+        return self
+
+    def add_eq(self):
+        cd = self._cls_dict
+
+        cd["__eq__"] = self._add_method_dunders(
+            _make_eq(self._cls, self._attrs)
+        )
+        cd["__ne__"] = self._add_method_dunders(_make_ne())
+
+        return self
+
+    def add_order(self):
+        cd = self._cls_dict
+
+        cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
+            self._add_method_dunders(meth)
+            for meth in _make_order(self._cls, self._attrs)
+        )
+
+        return self
+
+    def add_setattr(self):
+        if self._frozen:
+            return self
+
+        sa_attrs = {}
+        for a in self._attrs:
+            on_setattr = a.on_setattr or self._on_setattr
+            if on_setattr and on_setattr is not setters.NO_OP:
+                sa_attrs[a.name] = a, on_setattr
+
+        if not sa_attrs:
+            return self
+
+        if self._has_custom_setattr:
+            # We need to write a __setattr__ but there already is one!
+            raise ValueError(
+                "Can't combine custom __setattr__ with on_setattr hooks."
+            )
+
+        # docstring comes from _add_method_dunders
+        def __setattr__(self, name, val):
+            try:
+                a, hook = sa_attrs[name]
+            except KeyError:
+                nval = val
+            else:
+                nval = hook(self, a, val)
+
+            _obj_setattr(self, name, nval)
+
+        self._cls_dict["__attrs_own_setattr__"] = True
+        self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
+        self._wrote_own_setattr = True
+
+        return self
+
+    def _add_method_dunders(self, method):
+        """
+        Add __module__ and __qualname__ to a *method* if possible.
+        """
+        try:
+            method.__module__ = self._cls.__module__
+        except AttributeError:
+            pass
+
+        try:
+            method.__qualname__ = ".".join(
+                (self._cls.__qualname__, method.__name__)
+            )
+        except AttributeError:
+            pass
+
+        try:
+            method.__doc__ = "Method generated by attrs for class %s." % (
+                self._cls.__qualname__,
+            )
+        except AttributeError:
+            pass
+
+        return method
+
+
+def _determine_attrs_eq_order(cmp, eq, order, default_eq):
+    """
+    Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+    values of eq and order.  If *eq* is None, set it to *default_eq*.
+    """
+    if cmp is not None and any((eq is not None, order is not None)):
+        raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+    # cmp takes precedence due to bw-compatibility.
+    if cmp is not None:
+        return cmp, cmp
+
+    # If left None, equality is set to the specified default and ordering
+    # mirrors equality.
+    if eq is None:
+        eq = default_eq
+
+    if order is None:
+        order = eq
+
+    if eq is False and order is True:
+        raise ValueError("`order` can only be True if `eq` is True too.")
+
+    return eq, order
+
+
+def _determine_attrib_eq_order(cmp, eq, order, default_eq):
+    """
+    Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+    values of eq and order.  If *eq* is None, set it to *default_eq*.
+    """
+    if cmp is not None and any((eq is not None, order is not None)):
+        raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+    def decide_callable_or_boolean(value):
+        """
+        Decide whether a key function is used.
+        """
+        if callable(value):
+            value, key = True, value
+        else:
+            key = None
+        return value, key
+
+    # cmp takes precedence due to bw-compatibility.
+    if cmp is not None:
+        cmp, cmp_key = decide_callable_or_boolean(cmp)
+        return cmp, cmp_key, cmp, cmp_key
+
+    # If left None, equality is set to the specified default and ordering
+    # mirrors equality.
+    if eq is None:
+        eq, eq_key = default_eq, None
+    else:
+        eq, eq_key = decide_callable_or_boolean(eq)
+
+    if order is None:
+        order, order_key = eq, eq_key
+    else:
+        order, order_key = decide_callable_or_boolean(order)
+
+    if eq is False and order is True:
+        raise ValueError("`order` can only be True if `eq` is True too.")
+
+    return eq, eq_key, order, order_key
+
+
+def _determine_whether_to_implement(
+    cls, flag, auto_detect, dunders, default=True
+):
+    """
+    Check whether we should implement a set of methods for *cls*.
+
+    *flag* is the argument passed into @attr.s like 'init', *auto_detect* the
+    same as passed into @attr.s and *dunders* is a tuple of attribute names
+    whose presence signal that the user has implemented it themselves.
+
+    Return *default* if no reason for either for or against is found.
+    """
+    if flag is True or flag is False:
+        return flag
+
+    if flag is None and auto_detect is False:
+        return default
+
+    # Logically, flag is None and auto_detect is True here.
+    for dunder in dunders:
+        if _has_own_attribute(cls, dunder):
+            return False
+
+    return default
+
+
+def attrs(
+    maybe_cls=None,
+    these=None,
+    repr_ns=None,
+    repr=None,
+    cmp=None,
+    hash=None,
+    init=None,
+    slots=False,
+    frozen=False,
+    weakref_slot=True,
+    str=False,
+    auto_attribs=False,
+    kw_only=False,
+    cache_hash=False,
+    auto_exc=False,
+    eq=None,
+    order=None,
+    auto_detect=False,
+    collect_by_mro=False,
+    getstate_setstate=None,
+    on_setattr=None,
+    field_transformer=None,
+    match_args=True,
+):
     r"""
     A class decorator that adds `dunder
     <https://wiki.python.org/moin/DunderAlias>`_\ -methods according to the
-    specified attributes using :func:`attr.ib` or the *these* argument.
-
-    :param these: A dictionary of name to :func:`attr.ib` mappings.  This is
+    specified attributes using `attr.ib` or the *these* argument.
+
+    :param these: A dictionary of name to `attr.ib` mappings.  This is
         useful to avoid the definition of your attributes within the class body
         because you can't (e.g. if you want to add ``__repr__`` methods to
         Django models) or don't want to.
 
         If *these* is not ``None``, ``attrs`` will *not* search the class body
-        for attributes.
-
-    :type these: :class:`dict` of :class:`str` to :func:`attr.ib`
+        for attributes and will *not* remove any attributes from it.
+
+        If *these* is an ordered dict (`dict` on Python 3.6+,
+        `collections.OrderedDict` otherwise), the order is deduced from
+        the order of the attributes inside *these*.  Otherwise the order
+        of the definition of the attributes is used.
+
+    :type these: `dict` of `str` to `attr.ib`
 
     :param str repr_ns: When using nested classes, there's no way in Python 2
         to automatically detect that.  Therefore it's possible to set the
         namespace explicitly for a more meaningful ``repr`` output.
+    :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
+        *order*, and *hash* arguments explicitly, assume they are set to
+        ``True`` **unless any** of the involved methods for one of the
+        arguments is implemented in the *current* class (i.e. it is *not*
+        inherited from some base class).
+
+        So for example by implementing ``__eq__`` on a class yourself,
+        ``attrs`` will deduce ``eq=False`` and will create *neither*
+        ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
+        ``__ne__`` by default, so it *should* be enough to only implement
+        ``__eq__`` in most cases).
+
+        .. warning::
+
+           If you prevent ``attrs`` from creating the ordering methods for you
+           (``order=False``, e.g. by implementing ``__le__``), it becomes
+           *your* responsibility to make sure its ordering is sound. The best
+           way is to use the `functools.total_ordering` decorator.
+
+
+        Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
+        *cmp*, or *hash* overrides whatever *auto_detect* would determine.
+
+        *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises
+        an `attrs.exceptions.PythonTooOldError`.
+
     :param bool repr: Create a ``__repr__`` method with a human readable
-        represantation of ``attrs`` attributes..
+        representation of ``attrs`` attributes..
     :param bool str: Create a ``__str__`` method that is identical to
         ``__repr__``.  This is usually not necessary except for
-        :class:`Exception`\ s.
-    :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``,
-        ``__gt__``, and ``__ge__`` methods that compare the class as if it were
-        a tuple of its ``attrs`` attributes.  But the attributes are *only*
-        compared, if the type of both classes is *identical*!
-    :param hash: If ``None`` (default), the ``__hash__`` method is generated
-        according how *cmp* and *frozen* are set.
+        `Exception`\ s.
+    :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
+        and ``__ne__`` methods that check two instances for equality.
+
+        They compare the instances as if they were tuples of their ``attrs``
+        attributes if and only if the types of both classes are *identical*!
+    :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
+        ``__gt__``, and ``__ge__`` methods that behave like *eq* above and
+        allow instances to be ordered. If ``None`` (default) mirror value of
+        *eq*.
+    :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
+        and *order* to the same value. Must not be mixed with *eq* or *order*.
+    :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method
+        is generated according how *eq* and *frozen* are set.
 
         1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you.
-        2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to
+        2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
            None, marking it unhashable (which it is).
-        3. If *cmp* is False, ``__hash__`` will be left untouched meaning the
-           ``__hash__`` method of the superclass will be used (if superclass is
+        3. If *eq* is False, ``__hash__`` will be left untouched meaning the
+           ``__hash__`` method of the base class will be used (if base class is
            ``object``, this means it will fall back to id-based hashing.).
 
         Although not recommended, you can decide for yourself and force
@@ -279,29 +1286,37 @@
         didn't freeze it programmatically) by passing ``True`` or not.  Both of
         these cases are rather special and should be used carefully.
 
-        See the `Python documentation \
-        <https://docs.python.org/3/reference/datamodel.html#object.__hash__>`_
-        and the `GitHub issue that led to the default behavior \
-        <https://github.com/python-attrs/attrs/issues/136>`_ for more details.
-    :type hash: ``bool`` or ``None``
-    :param bool init: Create a ``__init__`` method that initialiazes the
-        ``attrs`` attributes.  Leading underscores are stripped for the
-        argument name.  If a ``__attrs_post_init__`` method exists on the
-        class, it will be called after the class is fully initialized.
-    :param bool slots: Create a slots_-style class that's more
-        memory-efficient.  See :ref:`slots` for further ramifications.
+        See our documentation on `hashing`, Python's documentation on
+        `object.__hash__`, and the `GitHub issue that led to the default \
+        behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
+        details.
+    :param bool init: Create a ``__init__`` method that initializes the
+        ``attrs`` attributes. Leading underscores are stripped for the argument
+        name. If a ``__attrs_pre_init__`` method exists on the class, it will
+        be called before the class is initialized. If a ``__attrs_post_init__``
+        method exists on the class, it will be called after the class is fully
+        initialized.
+
+        If ``init`` is ``False``, an ``__attrs_init__`` method will be
+        injected instead. This allows you to define a custom ``__init__``
+        method that can do pre-init work such as ``super().__init__()``,
+        and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
+    :param bool slots: Create a `slotted class <slotted classes>` that's more
+        memory-efficient. Slotted classes are generally superior to the default
+        dict classes, but have some gotchas you should know about, so we
+        encourage you to read the `glossary entry <slotted classes>`.
     :param bool frozen: Make instances immutable after initialization.  If
         someone attempts to modify a frozen instance,
-        :exc:`attr.exceptions.FrozenInstanceError` is raised.
-
-        Please note:
+        `attr.exceptions.FrozenInstanceError` is raised.
+
+        .. note::
 
             1. This is achieved by installing a custom ``__setattr__`` method
-               on your class so you can't implement an own one.
+               on your class, so you can't implement your own.
 
             2. True immutability is impossible in Python.
 
-            3. This *does* have a minor a runtime performance :ref:`impact
+            3. This *does* have a minor a runtime performance `impact
                <how-frozen>` when initializing new instances.  In other words:
                ``__init__`` is slightly slower with ``frozen=True``.
 
@@ -310,316 +1325,651 @@
                circumvent that limitation by using
                ``object.__setattr__(self, "attribute_name", value)``.
 
-        ..  _slots: https://docs.python.org/3.5/reference/datamodel.html#slots
-
-    ..  versionadded:: 16.0.0 *slots*
-    ..  versionadded:: 16.1.0 *frozen*
-    ..  versionadded:: 16.3.0 *str*, and support for ``__attrs_post_init__``.
-    ..  versionchanged::
-            17.1.0 *hash* supports ``None`` as value which is also the default
-            now.
+            5. Subclasses of a frozen class are frozen too.
+
+    :param bool weakref_slot: Make instances weak-referenceable.  This has no
+        effect unless ``slots`` is also enabled.
+    :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated
+        attributes (Python 3.6 and later only) from the class body.
+
+        In this case, you **must** annotate every field.  If ``attrs``
+        encounters a field that is set to an `attr.ib` but lacks a type
+        annotation, an `attr.exceptions.UnannotatedAttributeError` is
+        raised.  Use ``field_name: typing.Any = attr.ib(...)`` if you don't
+        want to set a type.
+
+        If you assign a value to those attributes (e.g. ``x: int = 42``), that
+        value becomes the default value like if it were passed using
+        ``attr.ib(default=42)``.  Passing an instance of `attrs.Factory` also
+        works as expected in most cases (see warning below).
+
+        Attributes annotated as `typing.ClassVar`, and attributes that are
+        neither annotated nor set to an `attr.ib` are **ignored**.
+
+        .. warning::
+           For features that use the attribute name to create decorators (e.g.
+           `validators <validators>`), you still *must* assign `attr.ib` to
+           them. Otherwise Python will either not find the name or try to use
+           the default value to call e.g. ``validator`` on it.
+
+           These errors can be quite confusing and probably the most common bug
+           report on our bug tracker.
+
+    :param bool kw_only: Make all attributes keyword-only (Python 3+)
+        in the generated ``__init__`` (if ``init`` is ``False``, this
+        parameter is ignored).
+    :param bool cache_hash: Ensure that the object's hash code is computed
+        only once and stored on the object.  If this is set to ``True``,
+        hashing must be either explicitly or implicitly enabled for this
+        class.  If the hash code is cached, avoid any reassignments of
+        fields involved in hash code computation or mutations of the objects
+        those fields point to after object creation.  If such changes occur,
+        the behavior of the object's hash code is undefined.
+    :param bool auto_exc: If the class subclasses `BaseException`
+        (which implicitly includes any subclass of any exception), the
+        following happens to behave like a well-behaved Python exceptions
+        class:
+
+        - the values for *eq*, *order*, and *hash* are ignored and the
+          instances compare and hash by the instance's ids (N.B. ``attrs`` will
+          *not* remove existing implementations of ``__hash__`` or the equality
+          methods. It just won't add own ones.),
+        - all attributes that are either passed into ``__init__`` or have a
+          default value are additionally available as a tuple in the ``args``
+          attribute,
+        - the value of *str* is ignored leaving ``__str__`` to base classes.
+    :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs``
+       collects attributes from base classes.  The default behavior is
+       incorrect in certain cases of multiple inheritance.  It should be on by
+       default but is kept off for backward-compatibility.
+
+       See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
+       more details.
+
+    :param Optional[bool] getstate_setstate:
+       .. note::
+          This is usually only interesting for slotted classes and you should
+          probably just set *auto_detect* to `True`.
+
+       If `True`, ``__getstate__`` and
+       ``__setstate__`` are generated and attached to the class. This is
+       necessary for slotted classes to be pickleable. If left `None`, it's
+       `True` by default for slotted classes and ``False`` for dict classes.
+
+       If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
+       and **either** ``__getstate__`` or ``__setstate__`` is detected directly
+       on the class (i.e. not inherited), it is set to `False` (this is usually
+       what you want).
+
+    :param on_setattr: A callable that is run whenever the user attempts to set
+        an attribute (either by assignment like ``i.x = 42`` or by using
+        `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
+        as validators: the instance, the attribute that is being modified, and
+        the new value.
+
+        If no exception is raised, the attribute is set to the return value of
+        the callable.
+
+        If a list of callables is passed, they're automatically wrapped in an
+        `attrs.setters.pipe`.
+    :type on_setattr: `callable`, or a list of callables, or `None`, or
+        `attrs.setters.NO_OP`
+
+    :param Optional[callable] field_transformer:
+        A function that is called with the original class object and all
+        fields right before ``attrs`` finalizes the class.  You can use
+        this, e.g., to automatically add converters or validators to
+        fields based on their types.  See `transform-fields` for more details.
+
+    :param bool match_args:
+        If `True` (default), set ``__match_args__`` on the class to support
+        :pep:`634` (Structural Pattern Matching). It is a tuple of all
+        non-keyword-only ``__init__`` parameter names on Python 3.10 and later.
+        Ignored on older Python versions.
+
+    .. versionadded:: 16.0.0 *slots*
+    .. versionadded:: 16.1.0 *frozen*
+    .. versionadded:: 16.3.0 *str*
+    .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
+    .. versionchanged:: 17.1.0
+       *hash* supports ``None`` as value which is also the default now.
+    .. versionadded:: 17.3.0 *auto_attribs*
+    .. versionchanged:: 18.1.0
+       If *these* is passed, no attributes are deleted from the class body.
+    .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
+    .. versionadded:: 18.2.0 *weakref_slot*
+    .. deprecated:: 18.2.0
+       ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
+       `DeprecationWarning` if the classes compared are subclasses of
+       each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
+       to each other.
+    .. versionchanged:: 19.2.0
+       ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
+       subclasses comparable anymore.
+    .. versionadded:: 18.2.0 *kw_only*
+    .. versionadded:: 18.2.0 *cache_hash*
+    .. versionadded:: 19.1.0 *auto_exc*
+    .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+    .. versionadded:: 19.2.0 *eq* and *order*
+    .. versionadded:: 20.1.0 *auto_detect*
+    .. versionadded:: 20.1.0 *collect_by_mro*
+    .. versionadded:: 20.1.0 *getstate_setstate*
+    .. versionadded:: 20.1.0 *on_setattr*
+    .. versionadded:: 20.3.0 *field_transformer*
+    .. versionchanged:: 21.1.0
+       ``init=False`` injects ``__attrs_init__``
+    .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
+    .. versionchanged:: 21.1.0 *cmp* undeprecated
+    .. versionadded:: 21.3.0 *match_args*
     """
+    eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
+    hash_ = hash  # work around the lack of nonlocal
+
+    if isinstance(on_setattr, (list, tuple)):
+        on_setattr = setters.pipe(*on_setattr)
+
     def wrap(cls):
-        if getattr(cls, "__class__", None) is None:
-            raise TypeError("attrs only works with new-style classes.")
-
-        if repr is False and str is True:
-            raise ValueError(
-                "__str__ can only be generated if a __repr__ exists."
-            )
-
-        if slots:
-            # Only need this later if we're using slots.
-            if these is None:
-                ca_list = [name
-                           for name, attr
-                           in cls.__dict__.items()
-                           if isinstance(attr, _CountingAttr)]
-            else:
-                ca_list = list(iterkeys(these))
-        _transform_attrs(cls, these)
-
-        # Can't just re-use frozen name because Python's scoping. :(
-        # Can't compare function objects because Python 2 is terrible. :(
-        effectively_frozen = _has_frozen_superclass(cls) or frozen
-        if repr is True:
-            cls = _add_repr(cls, ns=repr_ns)
+        is_frozen = frozen or _has_frozen_base_class(cls)
+        is_exc = auto_exc is True and issubclass(cls, BaseException)
+        has_own_setattr = auto_detect and _has_own_attribute(
+            cls, "__setattr__"
+        )
+
+        if has_own_setattr and is_frozen:
+            raise ValueError("Can't freeze a class with a custom __setattr__.")
+
+        builder = _ClassBuilder(
+            cls,
+            these,
+            slots,
+            is_frozen,
+            weakref_slot,
+            _determine_whether_to_implement(
+                cls,
+                getstate_setstate,
+                auto_detect,
+                ("__getstate__", "__setstate__"),
+                default=slots,
+            ),
+            auto_attribs,
+            kw_only,
+            cache_hash,
+            is_exc,
+            collect_by_mro,
+            on_setattr,
+            has_own_setattr,
+            field_transformer,
+        )
+        if _determine_whether_to_implement(
+            cls, repr, auto_detect, ("__repr__",)
+        ):
+            builder.add_repr(repr_ns)
         if str is True:
-            cls.__str__ = cls.__repr__
-        if cmp is True:
-            cls = _add_cmp(cls)
-
+            builder.add_str()
+
+        eq = _determine_whether_to_implement(
+            cls, eq_, auto_detect, ("__eq__", "__ne__")
+        )
+        if not is_exc and eq is True:
+            builder.add_eq()
+        if not is_exc and _determine_whether_to_implement(
+            cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
+        ):
+            builder.add_order()
+
+        builder.add_setattr()
+
+        if (
+            hash_ is None
+            and auto_detect is True
+            and _has_own_attribute(cls, "__hash__")
+        ):
+            hash = False
+        else:
+            hash = hash_
         if hash is not True and hash is not False and hash is not None:
+            # Can't use `hash in` because 1 == True for example.
             raise TypeError(
                 "Invalid value for hash.  Must be True, False, or None."
             )
-        elif hash is False or (hash is None and cmp is False):
-            pass
-        elif hash is True or (hash is None and cmp is True and frozen is True):
-            cls = _add_hash(cls)
+        elif hash is False or (hash is None and eq is False) or is_exc:
+            # Don't do anything. Should fall back to __object__'s __hash__
+            # which is by id.
+            if cache_hash:
+                raise TypeError(
+                    "Invalid value for cache_hash.  To use hash caching,"
+                    " hashing must be either explicitly or implicitly "
+                    "enabled."
+                )
+        elif hash is True or (
+            hash is None and eq is True and is_frozen is True
+        ):
+            # Build a __hash__ if told so, or if it's safe.
+            builder.add_hash()
         else:
-            cls.__hash__ = None
-
-        if init is True:
-            cls = _add_init(cls, effectively_frozen)
-        if effectively_frozen is True:
-            cls.__setattr__ = _frozen_setattrs
-            cls.__delattr__ = _frozen_delattrs
-            if slots is True:
-                # slots and frozen require __getstate__/__setstate__ to work
-                cls = _add_pickle(cls)
-        if slots is True:
-            cls_dict = dict(cls.__dict__)
-            cls_dict["__slots__"] = tuple(ca_list)
-            for ca_name in ca_list:
-                # It might not actually be in there, e.g. if using 'these'.
-                cls_dict.pop(ca_name, None)
-            cls_dict.pop("__dict__", None)
-
-            qualname = getattr(cls, "__qualname__", None)
-            cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
-            if qualname is not None:
-                cls.__qualname__ = qualname
-
-        return cls
-
-    # attrs_or class type depends on the usage of the decorator.  It's a class
-    # if it's used as `@attributes` but ``None`` if used # as `@attributes()`.
+            # Raise TypeError on attempts to hash.
+            if cache_hash:
+                raise TypeError(
+                    "Invalid value for cache_hash.  To use hash caching,"
+                    " hashing must be either explicitly or implicitly "
+                    "enabled."
+                )
+            builder.make_unhashable()
+
+        if _determine_whether_to_implement(
+            cls, init, auto_detect, ("__init__",)
+        ):
+            builder.add_init()
+        else:
+            builder.add_attrs_init()
+            if cache_hash:
+                raise TypeError(
+                    "Invalid value for cache_hash.  To use hash caching,"
+                    " init must be True."
+                )
+
+        if (
+            PY310
+            and match_args
+            and not _has_own_attribute(cls, "__match_args__")
+        ):
+            builder.add_match_args()
+
+        return builder.build_class()
+
+    # maybe_cls's type depends on the usage of the decorator.  It's a class
+    # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
     if maybe_cls is None:
         return wrap
     else:
         return wrap(maybe_cls)
 
 
-if PY2:
-    def _has_frozen_superclass(cls):
-        """
-        Check whether *cls* has a frozen ancestor by looking at its
-        __setattr__.
-        """
-        return (
-            getattr(
-                cls.__setattr__, "__module__", None
-            ) == _frozen_setattrs.__module__ and
-            cls.__setattr__.__name__ == _frozen_setattrs.__name__
+_attrs = attrs
+"""
+Internal alias so we can use it in functions that take an argument called
+*attrs*.
+"""
+
+
+def _has_frozen_base_class(cls):
+    """
+    Check whether *cls* has a frozen ancestor by looking at its
+    __setattr__.
+    """
+    return cls.__setattr__ is _frozen_setattrs
+
+
+def _generate_unique_filename(cls, func_name):
+    """
+    Create a "filename" suitable for a function being generated.
+    """
+    unique_filename = "<attrs generated {} {}.{}>".format(
+        func_name,
+        cls.__module__,
+        getattr(cls, "__qualname__", cls.__name__),
+    )
+    return unique_filename
+
+
+def _make_hash(cls, attrs, frozen, cache_hash):
+    attrs = tuple(
+        a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
+    )
+
+    tab = "        "
+
+    unique_filename = _generate_unique_filename(cls, "hash")
+    type_hash = hash(unique_filename)
+    # If eq is custom generated, we need to include the functions in globs
+    globs = {}
+
+    hash_def = "def __hash__(self"
+    hash_func = "hash(("
+    closing_braces = "))"
+    if not cache_hash:
+        hash_def += "):"
+    else:
+        hash_def += ", *"
+
+        hash_def += (
+            ", _cache_wrapper="
+            + "__import__('attr._make')._make._CacheHashWrapper):"
         )
-else:
-    def _has_frozen_superclass(cls):
+        hash_func = "_cache_wrapper(" + hash_func
+        closing_braces += ")"
+
+    method_lines = [hash_def]
+
+    def append_hash_computation_lines(prefix, indent):
         """
-        Check whether *cls* has a frozen ancestor by looking at its
-        __setattr__.
+        Generate the code for actually computing the hash code.
+        Below this will either be returned directly or used to compute
+        a value which is then cached, depending on the value of cache_hash
         """
-        return cls.__setattr__ == _frozen_setattrs
-
-
-def _attrs_to_tuple(obj, attrs):
-    """
-    Create a tuple of all values of *obj*'s *attrs*.
-    """
-    return tuple(getattr(obj, a.name) for a in attrs)
-
-
-def _add_hash(cls, attrs=None):
+
+        method_lines.extend(
+            [
+                indent + prefix + hash_func,
+                indent + "        %d," % (type_hash,),
+            ]
+        )
+
+        for a in attrs:
+            if a.eq_key:
+                cmp_name = "_%s_key" % (a.name,)
+                globs[cmp_name] = a.eq_key
+                method_lines.append(
+                    indent + "        %s(self.%s)," % (cmp_name, a.name)
+                )
+            else:
+                method_lines.append(indent + "        self.%s," % a.name)
+
+        method_lines.append(indent + "    " + closing_braces)
+
+    if cache_hash:
+        method_lines.append(tab + "if self.%s is None:" % _hash_cache_field)
+        if frozen:
+            append_hash_computation_lines(
+                "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2
+            )
+            method_lines.append(tab * 2 + ")")  # close __setattr__
+        else:
+            append_hash_computation_lines(
+                "self.%s = " % _hash_cache_field, tab * 2
+            )
+        method_lines.append(tab + "return self.%s" % _hash_cache_field)
+    else:
+        append_hash_computation_lines("return ", tab)
+
+    script = "\n".join(method_lines)
+    return _make_method("__hash__", script, unique_filename, globs)
+
+
+def _add_hash(cls, attrs):
     """
     Add a hash method to *cls*.
     """
-    if attrs is None:
-        attrs = [a
-                 for a in cls.__attrs_attrs__
-                 if a.hash is True or (a.hash is None and a.cmp is True)]
-
-    def hash_(self):
+    cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
+    return cls
+
+
+def _make_ne():
+    """
+    Create __ne__ method.
+    """
+
+    def __ne__(self, other):
         """
-        Automatically created by attrs.
+        Check equality and either forward a NotImplemented or
+        return the result negated.
         """
-        return hash(_attrs_to_tuple(self, attrs))
-
-    cls.__hash__ = hash_
-    return cls
-
-
-def _add_cmp(cls, attrs=None):
+        result = self.__eq__(other)
+        if result is NotImplemented:
+            return NotImplemented
+
+        return not result
+
+    return __ne__
+
+
+def _make_eq(cls, attrs):
+    """
+    Create __eq__ method for *cls* with *attrs*.
     """
-    Add comparison methods to *cls*.
+    attrs = [a for a in attrs if a.eq]
+
+    unique_filename = _generate_unique_filename(cls, "eq")
+    lines = [
+        "def __eq__(self, other):",
+        "    if other.__class__ is not self.__class__:",
+        "        return NotImplemented",
+    ]
+
+    # We can't just do a big self.x = other.x and... clause due to
+    # irregularities like nan == nan is false but (nan,) == (nan,) is true.
+    globs = {}
+    if attrs:
+        lines.append("    return  (")
+        others = ["    ) == ("]
+        for a in attrs:
+            if a.eq_key:
+                cmp_name = "_%s_key" % (a.name,)
+                # Add the key function to the global namespace
+                # of the evaluated function.
+                globs[cmp_name] = a.eq_key
+                lines.append(
+                    "        %s(self.%s),"
+                    % (
+                        cmp_name,
+                        a.name,
+                    )
+                )
+                others.append(
+                    "        %s(other.%s),"
+                    % (
+                        cmp_name,
+                        a.name,
+                    )
+                )
+            else:
+                lines.append("        self.%s," % (a.name,))
+                others.append("        other.%s," % (a.name,))
+
+        lines += others + ["    )"]
+    else:
+        lines.append("    return True")
+
+    script = "\n".join(lines)
+
+    return _make_method("__eq__", script, unique_filename, globs)
+
+
+def _make_order(cls, attrs):
     """
-    if attrs is None:
-        attrs = [a for a in cls.__attrs_attrs__ if a.cmp]
+    Create ordering methods for *cls* with *attrs*.
+    """
+    attrs = [a for a in attrs if a.order]
 
     def attrs_to_tuple(obj):
         """
         Save us some typing.
         """
-        return _attrs_to_tuple(obj, attrs)
-
-    def eq(self, other):
+        return tuple(
+            key(value) if key else value
+            for value, key in (
+                (getattr(obj, a.name), a.order_key) for a in attrs
+            )
+        )
+
+    def __lt__(self, other):
+        """
+        Automatically created by attrs.
+        """
+        if other.__class__ is self.__class__:
+            return attrs_to_tuple(self) < attrs_to_tuple(other)
+
+        return NotImplemented
+
+    def __le__(self, other):
+        """
+        Automatically created by attrs.
+        """
+        if other.__class__ is self.__class__:
+            return attrs_to_tuple(self) <= attrs_to_tuple(other)
+
+        return NotImplemented
+
+    def __gt__(self, other):
+        """
+        Automatically created by attrs.
+        """
+        if other.__class__ is self.__class__:
+            return attrs_to_tuple(self) > attrs_to_tuple(other)
+
+        return NotImplemented
+
+    def __ge__(self, other):
         """
         Automatically created by attrs.
         """
         if other.__class__ is self.__class__:
-            return attrs_to_tuple(self) == attrs_to_tuple(other)
-        else:
-            return NotImplemented
-
-    def ne(self, other):
-        """
-        Automatically created by attrs.
-        """
-        result = eq(self, other)
-        if result is NotImplemented:
-            return NotImplemented
-        else:
-            return not result
-
-    def lt(self, other):
-        """
-        Automatically created by attrs.
-        """
-        if isinstance(other, self.__class__):
-            return attrs_to_tuple(self) < attrs_to_tuple(other)
-        else:
-            return NotImplemented
-
-    def le(self, other):
-        """
-        Automatically created by attrs.
-        """
-        if isinstance(other, self.__class__):
-            return attrs_to_tuple(self) <= attrs_to_tuple(other)
-        else:
-            return NotImplemented
-
-    def gt(self, other):
-        """
-        Automatically created by attrs.
-        """
-        if isinstance(other, self.__class__):
-            return attrs_to_tuple(self) > attrs_to_tuple(other)
-        else:
-            return NotImplemented
-
-    def ge(self, other):
-        """
-        Automatically created by attrs.
-        """
-        if isinstance(other, self.__class__):
             return attrs_to_tuple(self) >= attrs_to_tuple(other)
-        else:
-            return NotImplemented
-
-    cls.__eq__ = eq
-    cls.__ne__ = ne
-    cls.__lt__ = lt
-    cls.__le__ = le
-    cls.__gt__ = gt
-    cls.__ge__ = ge
+
+        return NotImplemented
+
+    return __lt__, __le__, __gt__, __ge__
+
+
+def _add_eq(cls, attrs=None):
+    """
+    Add equality methods to *cls* with *attrs*.
+    """
+    if attrs is None:
+        attrs = cls.__attrs_attrs__
+
+    cls.__eq__ = _make_eq(cls, attrs)
+    cls.__ne__ = _make_ne()
 
     return cls
 
 
+if HAS_F_STRINGS:
+
+    def _make_repr(attrs, ns, cls):
+        unique_filename = _generate_unique_filename(cls, "repr")
+        # Figure out which attributes to include, and which function to use to
+        # format them. The a.repr value can be either bool or a custom
+        # callable.
+        attr_names_with_reprs = tuple(
+            (a.name, (repr if a.repr is True else a.repr), a.init)
+            for a in attrs
+            if a.repr is not False
+        )
+        globs = {
+            name + "_repr": r
+            for name, r, _ in attr_names_with_reprs
+            if r != repr
+        }
+        globs["_compat"] = _compat
+        globs["AttributeError"] = AttributeError
+        globs["NOTHING"] = NOTHING
+        attribute_fragments = []
+        for name, r, i in attr_names_with_reprs:
+            accessor = (
+                "self." + name
+                if i
+                else 'getattr(self, "' + name + '", NOTHING)'
+            )
+            fragment = (
+                "%s={%s!r}" % (name, accessor)
+                if r == repr
+                else "%s={%s_repr(%s)}" % (name, name, accessor)
+            )
+            attribute_fragments.append(fragment)
+        repr_fragment = ", ".join(attribute_fragments)
+
+        if ns is None:
+            cls_name_fragment = (
+                '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}'
+            )
+        else:
+            cls_name_fragment = ns + ".{self.__class__.__name__}"
+
+        lines = [
+            "def __repr__(self):",
+            "  try:",
+            "    already_repring = _compat.repr_context.already_repring",
+            "  except AttributeError:",
+            "    already_repring = {id(self),}",
+            "    _compat.repr_context.already_repring = already_repring",
+            "  else:",
+            "    if id(self) in already_repring:",
+            "      return '...'",
+            "    else:",
+            "      already_repring.add(id(self))",
+            "  try:",
+            "    return f'%s(%s)'" % (cls_name_fragment, repr_fragment),
+            "  finally:",
+            "    already_repring.remove(id(self))",
+        ]
+
+        return _make_method(
+            "__repr__", "\n".join(lines), unique_filename, globs=globs
+        )
+
+else:
+
+    def _make_repr(attrs, ns, _):
+        """
+        Make a repr method that includes relevant *attrs*, adding *ns* to the
+        full name.
+        """
+
+        # Figure out which attributes to include, and which function to use to
+        # format them. The a.repr value can be either bool or a custom
+        # callable.
+        attr_names_with_reprs = tuple(
+            (a.name, repr if a.repr is True else a.repr)
+            for a in attrs
+            if a.repr is not False
+        )
+
+        def __repr__(self):
+            """
+            Automatically created by attrs.
+            """
+            try:
+                already_repring = _compat.repr_context.already_repring
+            except AttributeError:
+                already_repring = set()
+                _compat.repr_context.already_repring = already_repring
+
+            if id(self) in already_repring:
+                return "..."
+            real_cls = self.__class__
+            if ns is None:
+                class_name = real_cls.__qualname__.rsplit(">.", 1)[-1]
+            else:
+                class_name = ns + "." + real_cls.__name__
+
+            # Since 'self' remains on the stack (i.e.: strongly referenced)
+            # for the duration of this call, it's safe to depend on id(...)
+            # stability, and not need to track the instance and therefore
+            # worry about properties like weakref- or hash-ability.
+            already_repring.add(id(self))
+            try:
+                result = [class_name, "("]
+                first = True
+                for name, attr_repr in attr_names_with_reprs:
+                    if first:
+                        first = False
+                    else:
+                        result.append(", ")
+                    result.extend(
+                        (name, "=", attr_repr(getattr(self, name, NOTHING)))
+                    )
+                return "".join(result) + ")"
+            finally:
+                already_repring.remove(id(self))
+
+        return __repr__
+
+
 def _add_repr(cls, ns=None, attrs=None):
     """
     Add a repr method to *cls*.
     """
     if attrs is None:
-        attrs = [a for a in cls.__attrs_attrs__ if a.repr]
-
-    def repr_(self):
-        """
-        Automatically created by attrs.
-        """
-        real_cls = self.__class__
-        if ns is None:
-            qualname = getattr(real_cls, "__qualname__", None)
-            if qualname is not None:
-                class_name = qualname.rsplit(">.", 1)[-1]
-            else:
-                class_name = real_cls.__name__
-        else:
-            class_name = ns + "." + real_cls.__name__
-
-        return "{0}({1})".format(
-            class_name,
-            ", ".join(a.name + "=" + repr(getattr(self, a.name))
-                      for a in attrs)
-        )
-    cls.__repr__ = repr_
-    return cls
-
-
-def _add_init(cls, frozen):
-    """
-    Add a __init__ method to *cls*.  If *frozen* is True, make it immutable.
-    """
-    attrs = [a for a in cls.__attrs_attrs__
-             if a.init or a.default is not NOTHING]
-
-    # We cache the generated init methods for the same kinds of attributes.
-    sha1 = hashlib.sha1()
-    r = repr(attrs)
-    if not isinstance(r, bytes):
-        r = r.encode('utf-8')
-    sha1.update(r)
-    unique_filename = "<attrs generated init {0}>".format(
-        sha1.hexdigest()
-    )
-
-    script, globs = _attrs_to_script(
-        attrs,
-        frozen,
-        getattr(cls, "__attrs_post_init__", False),
-    )
-    locs = {}
-    bytecode = compile(script, unique_filename, "exec")
-    attr_dict = dict((a.name, a) for a in attrs)
-    globs.update({
-        "NOTHING": NOTHING,
-        "attr_dict": attr_dict,
-    })
-    if frozen is True:
-        # Save the lookup overhead in __init__ if we need to circumvent
-        # immutability.
-        globs["_cached_setattr"] = _obj_setattr
-    eval(bytecode, globs, locs)
-    init = locs["__init__"]
-
-    # In order of debuggers like PDB being able to step through the code,
-    # we add a fake linecache entry.
-    linecache.cache[unique_filename] = (
-        len(script),
-        None,
-        script.splitlines(True),
-        unique_filename
-    )
-    cls.__init__ = init
-    return cls
-
-
-def _add_pickle(cls):
-    """
-    Add pickle helpers, needed for frozen and slotted classes
-    """
-    def _slots_getstate__(obj):
-        """
-        Play nice with pickle.
-        """
-        return tuple(getattr(obj, a.name) for a in fields(obj.__class__))
-
-    def _slots_setstate__(obj, state):
-        """
-        Play nice with pickle.
-        """
-        __bound_setattr = _obj_setattr.__get__(obj, Attribute)
-        for a, value in zip(fields(obj.__class__), state):
-            __bound_setattr(a.name, value)
-
-    cls.__getstate__ = _slots_getstate__
-    cls.__setstate__ = _slots_setstate__
+        attrs = cls.__attrs_attrs__
+
+    cls.__repr__ = _make_repr(attrs, ns, cls)
     return cls
 
 
 def fields(cls):
     """
-    Returns the tuple of ``attrs`` attributes for a class.
+    Return the tuple of ``attrs`` attributes for a class.
 
     The tuple also allows accessing the fields by their names (see below for
     examples).
@@ -630,12 +1980,12 @@
     :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
         class.
 
-    :rtype: tuple (with name accesors) of :class:`attr.Attribute`
+    :rtype: tuple (with name accessors) of `attrs.Attribute`
 
     ..  versionchanged:: 16.2.0 Returned tuple allows accessing the fields
         by name.
     """
-    if not isclass(cls):
+    if not isinstance(cls, type):
         raise TypeError("Passed object must be a class.")
     attrs = getattr(cls, "__attrs_attrs__", None)
     if attrs is None:
@@ -645,6 +1995,34 @@
     return attrs
 
 
+def fields_dict(cls):
+    """
+    Return an ordered dictionary of ``attrs`` attributes for a class, whose
+    keys are the attribute names.
+
+    :param type cls: Class to introspect.
+
+    :raise TypeError: If *cls* is not a class.
+    :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
+        class.
+
+    :rtype: an ordered dict where keys are attribute names and values are
+        `attrs.Attribute`\\ s. This will be a `dict` if it's
+        naturally ordered like on Python 3.6+ or an
+        :class:`~collections.OrderedDict` otherwise.
+
+    .. versionadded:: 18.1.0
+    """
+    if not isinstance(cls, type):
+        raise TypeError("Passed object must be a class.")
+    attrs = getattr(cls, "__attrs_attrs__", None)
+    if attrs is None:
+        raise NotAnAttrsClassError(
+            "{cls!r} is not an attrs-decorated class.".format(cls=cls)
+        )
+    return ordered_dict((a.name, a) for a in attrs)
+
+
 def validate(inst):
     """
     Validate all attributes on *inst* that have a validator.
@@ -662,240 +2040,623 @@
             v(inst, a, getattr(inst, a.name))
 
 
-def _attrs_to_script(attrs, frozen, post_init):
+def _is_slot_cls(cls):
+    return "__slots__" in cls.__dict__
+
+
+def _is_slot_attr(a_name, base_attr_map):
+    """
+    Check if the attribute name comes from a slot class.
+    """
+    return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
+
+
+def _make_init(
+    cls,
+    attrs,
+    pre_init,
+    post_init,
+    frozen,
+    slots,
+    cache_hash,
+    base_attr_map,
+    is_exc,
+    cls_on_setattr,
+    attrs_init,
+):
+    has_cls_on_setattr = (
+        cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP
+    )
+
+    if frozen and has_cls_on_setattr:
+        raise ValueError("Frozen classes can't use on_setattr.")
+
+    needs_cached_setattr = cache_hash or frozen
+    filtered_attrs = []
+    attr_dict = {}
+    for a in attrs:
+        if not a.init and a.default is NOTHING:
+            continue
+
+        filtered_attrs.append(a)
+        attr_dict[a.name] = a
+
+        if a.on_setattr is not None:
+            if frozen is True:
+                raise ValueError("Frozen classes can't use on_setattr.")
+
+            needs_cached_setattr = True
+        elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP:
+            needs_cached_setattr = True
+
+    unique_filename = _generate_unique_filename(cls, "init")
+
+    script, globs, annotations = _attrs_to_init_script(
+        filtered_attrs,
+        frozen,
+        slots,
+        pre_init,
+        post_init,
+        cache_hash,
+        base_attr_map,
+        is_exc,
+        has_cls_on_setattr,
+        attrs_init,
+    )
+    if cls.__module__ in sys.modules:
+        # This makes typing.get_type_hints(CLS.__init__) resolve string types.
+        globs.update(sys.modules[cls.__module__].__dict__)
+
+    globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
+
+    if needs_cached_setattr:
+        # Save the lookup overhead in __init__ if we need to circumvent
+        # setattr hooks.
+        globs["_setattr"] = _obj_setattr
+
+    init = _make_method(
+        "__attrs_init__" if attrs_init else "__init__",
+        script,
+        unique_filename,
+        globs,
+    )
+    init.__annotations__ = annotations
+
+    return init
+
+
+def _setattr(attr_name, value_var, has_on_setattr):
+    """
+    Use the cached object.setattr to set *attr_name* to *value_var*.
+    """
+    return "_setattr(self, '%s', %s)" % (attr_name, value_var)
+
+
+def _setattr_with_converter(attr_name, value_var, has_on_setattr):
+    """
+    Use the cached object.setattr to set *attr_name* to *value_var*, but run
+    its converter first.
+    """
+    return "_setattr(self, '%s', %s(%s))" % (
+        attr_name,
+        _init_converter_pat % (attr_name,),
+        value_var,
+    )
+
+
+def _assign(attr_name, value, has_on_setattr):
+    """
+    Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
+    relegate to _setattr.
+    """
+    if has_on_setattr:
+        return _setattr(attr_name, value, True)
+
+    return "self.%s = %s" % (attr_name, value)
+
+
+def _assign_with_converter(attr_name, value_var, has_on_setattr):
+    """
+    Unless *attr_name* has an on_setattr hook, use normal assignment after
+    conversion. Otherwise relegate to _setattr_with_converter.
+    """
+    if has_on_setattr:
+        return _setattr_with_converter(attr_name, value_var, True)
+
+    return "self.%s = %s(%s)" % (
+        attr_name,
+        _init_converter_pat % (attr_name,),
+        value_var,
+    )
+
+
+def _attrs_to_init_script(
+    attrs,
+    frozen,
+    slots,
+    pre_init,
+    post_init,
+    cache_hash,
+    base_attr_map,
+    is_exc,
+    has_cls_on_setattr,
+    attrs_init,
+):
     """
     Return a script of an initializer for *attrs* and a dict of globals.
 
     The globals are expected by the generated script.
 
-     If *frozen* is True, we cannot set the attributes directly so we use
+    If *frozen* is True, we cannot set the attributes directly so we use
     a cached ``object.__setattr__``.
     """
     lines = []
+    if pre_init:
+        lines.append("self.__attrs_pre_init__()")
+
     if frozen is True:
-        lines.append(
-            # Circumvent the __setattr__ descriptor to save one lookup per
-            # assignment.
-            "_setattr = _cached_setattr.__get__(self, self.__class__)"
-        )
-
-        def fmt_setter(attr_name, value_var):
-            return "_setattr('%(attr_name)s', %(value_var)s)" % {
-                "attr_name": attr_name,
-                "value_var": value_var,
-            }
-
-        def fmt_setter_with_converter(attr_name, value_var):
-            conv_name = _init_convert_pat.format(attr_name)
-            return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % {
-                "attr_name": attr_name,
-                "value_var": value_var,
-                "conv": conv_name,
-            }
+        if slots is True:
+            fmt_setter = _setattr
+            fmt_setter_with_converter = _setattr_with_converter
+        else:
+            # Dict frozen classes assign directly to __dict__.
+            # But only if the attribute doesn't come from an ancestor slot
+            # class.
+            # Note _inst_dict will be used again below if cache_hash is True
+            lines.append("_inst_dict = self.__dict__")
+
+            def fmt_setter(attr_name, value_var, has_on_setattr):
+                if _is_slot_attr(attr_name, base_attr_map):
+                    return _setattr(attr_name, value_var, has_on_setattr)
+
+                return "_inst_dict['%s'] = %s" % (attr_name, value_var)
+
+            def fmt_setter_with_converter(
+                attr_name, value_var, has_on_setattr
+            ):
+                if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
+                    return _setattr_with_converter(
+                        attr_name, value_var, has_on_setattr
+                    )
+
+                return "_inst_dict['%s'] = %s(%s)" % (
+                    attr_name,
+                    _init_converter_pat % (attr_name,),
+                    value_var,
+                )
+
     else:
-        def fmt_setter(attr_name, value):
-            return "self.%(attr_name)s = %(value)s" % {
-                "attr_name": attr_name,
-                "value": value,
-            }
-
-        def fmt_setter_with_converter(attr_name, value_var):
-            conv_name = _init_convert_pat.format(attr_name)
-            return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % {
-                "attr_name": attr_name,
-                "value_var": value_var,
-                "conv": conv_name,
-            }
+        # Not frozen.
+        fmt_setter = _assign
+        fmt_setter_with_converter = _assign_with_converter
 
     args = []
+    kw_only_args = []
     attrs_to_validate = []
 
     # This is a dictionary of names to validator and converter callables.
     # Injecting this into __init__ globals lets us avoid lookups.
     names_for_globals = {}
+    annotations = {"return": None}
 
     for a in attrs:
         if a.validator:
             attrs_to_validate.append(a)
+
         attr_name = a.name
+        has_on_setattr = a.on_setattr is not None or (
+            a.on_setattr is not setters.NO_OP and has_cls_on_setattr
+        )
         arg_name = a.name.lstrip("_")
+
         has_factory = isinstance(a.default, Factory)
         if has_factory and a.default.takes_self:
             maybe_self = "self"
         else:
             maybe_self = ""
+
         if a.init is False:
             if has_factory:
                 init_factory_name = _init_factory_pat.format(a.name)
-                if a.convert is not None:
-                    lines.append(fmt_setter_with_converter(
-                        attr_name,
-                        init_factory_name + "({0})".format(maybe_self)))
-                    conv_name = _init_convert_pat.format(a.name)
-                    names_for_globals[conv_name] = a.convert
+                if a.converter is not None:
+                    lines.append(
+                        fmt_setter_with_converter(
+                            attr_name,
+                            init_factory_name + "(%s)" % (maybe_self,),
+                            has_on_setattr,
+                        )
+                    )
+                    conv_name = _init_converter_pat % (a.name,)
+                    names_for_globals[conv_name] = a.converter
                 else:
-                    lines.append(fmt_setter(
-                        attr_name,
-                        init_factory_name + "({0})".format(maybe_self)
-                    ))
+                    lines.append(
+                        fmt_setter(
+                            attr_name,
+                            init_factory_name + "(%s)" % (maybe_self,),
+                            has_on_setattr,
+                        )
+                    )
                 names_for_globals[init_factory_name] = a.default.factory
             else:
-                if a.convert is not None:
-                    lines.append(fmt_setter_with_converter(
-                        attr_name,
-                        "attr_dict['{attr_name}'].default"
-                        .format(attr_name=attr_name)
-                    ))
-                    conv_name = _init_convert_pat.format(a.name)
-                    names_for_globals[conv_name] = a.convert
+                if a.converter is not None:
+                    lines.append(
+                        fmt_setter_with_converter(
+                            attr_name,
+                            "attr_dict['%s'].default" % (attr_name,),
+                            has_on_setattr,
+                        )
+                    )
+                    conv_name = _init_converter_pat % (a.name,)
+                    names_for_globals[conv_name] = a.converter
                 else:
-                    lines.append(fmt_setter(
-                        attr_name,
-                        "attr_dict['{attr_name}'].default"
-                        .format(attr_name=attr_name)
-                    ))
+                    lines.append(
+                        fmt_setter(
+                            attr_name,
+                            "attr_dict['%s'].default" % (attr_name,),
+                            has_on_setattr,
+                        )
+                    )
         elif a.default is not NOTHING and not has_factory:
-            args.append(
-                "{arg_name}=attr_dict['{attr_name}'].default".format(
-                    arg_name=arg_name,
-                    attr_name=attr_name,
+            arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name)
+            if a.kw_only:
+                kw_only_args.append(arg)
+            else:
+                args.append(arg)
+
+            if a.converter is not None:
+                lines.append(
+                    fmt_setter_with_converter(
+                        attr_name, arg_name, has_on_setattr
+                    )
                 )
-            )
-            if a.convert is not None:
-                lines.append(fmt_setter_with_converter(attr_name, arg_name))
-                names_for_globals[_init_convert_pat.format(a.name)] = a.convert
+                names_for_globals[
+                    _init_converter_pat % (a.name,)
+                ] = a.converter
             else:
-                lines.append(fmt_setter(attr_name, arg_name))
+                lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
         elif has_factory:
-            args.append("{arg_name}=NOTHING".format(arg_name=arg_name))
-            lines.append("if {arg_name} is not NOTHING:"
-                         .format(arg_name=arg_name))
+            arg = "%s=NOTHING" % (arg_name,)
+            if a.kw_only:
+                kw_only_args.append(arg)
+            else:
+                args.append(arg)
+            lines.append("if %s is not NOTHING:" % (arg_name,))
+
             init_factory_name = _init_factory_pat.format(a.name)
-            if a.convert is not None:
-                lines.append("    " + fmt_setter_with_converter(attr_name,
-                                                                arg_name))
+            if a.converter is not None:
+                lines.append(
+                    "    "
+                    + fmt_setter_with_converter(
+                        attr_name, arg_name, has_on_setattr
+                    )
+                )
                 lines.append("else:")
-                lines.append("    " + fmt_setter_with_converter(
-                    attr_name,
-                    init_factory_name + "({0})".format(maybe_self)
-                ))
-                names_for_globals[_init_convert_pat.format(a.name)] = a.convert
+                lines.append(
+                    "    "
+                    + fmt_setter_with_converter(
+                        attr_name,
+                        init_factory_name + "(" + maybe_self + ")",
+                        has_on_setattr,
+                    )
+                )
+                names_for_globals[
+                    _init_converter_pat % (a.name,)
+                ] = a.converter
             else:
-                lines.append("    " + fmt_setter(attr_name, arg_name))
+                lines.append(
+                    "    " + fmt_setter(attr_name, arg_name, has_on_setattr)
+                )
                 lines.append("else:")
-                lines.append("    " + fmt_setter(
-                    attr_name,
-                    init_factory_name + "({0})".format(maybe_self)
-                ))
+                lines.append(
+                    "    "
+                    + fmt_setter(
+                        attr_name,
+                        init_factory_name + "(" + maybe_self + ")",
+                        has_on_setattr,
+                    )
+                )
             names_for_globals[init_factory_name] = a.default.factory
         else:
-            args.append(arg_name)
-            if a.convert is not None:
-                lines.append(fmt_setter_with_converter(attr_name, arg_name))
-                names_for_globals[_init_convert_pat.format(a.name)] = a.convert
+            if a.kw_only:
+                kw_only_args.append(arg_name)
             else:
-                lines.append(fmt_setter(attr_name, arg_name))
+                args.append(arg_name)
+
+            if a.converter is not None:
+                lines.append(
+                    fmt_setter_with_converter(
+                        attr_name, arg_name, has_on_setattr
+                    )
+                )
+                names_for_globals[
+                    _init_converter_pat % (a.name,)
+                ] = a.converter
+            else:
+                lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+        if a.init is True:
+            if a.type is not None and a.converter is None:
+                annotations[arg_name] = a.type
+            elif a.converter is not None:
+                # Try to get the type from the converter.
+                t = _AnnotationExtractor(a.converter).get_first_param_type()
+                if t:
+                    annotations[arg_name] = t
 
     if attrs_to_validate:  # we can skip this if there are no validators.
         names_for_globals["_config"] = _config
         lines.append("if _config._run_validators is True:")
         for a in attrs_to_validate:
-            val_name = "__attr_validator_{}".format(a.name)
-            attr_name = "__attr_{}".format(a.name)
-            lines.append("    {}(self, {}, self.{})".format(
-                val_name, attr_name, a.name))
+            val_name = "__attr_validator_" + a.name
+            attr_name = "__attr_" + a.name
+            lines.append(
+                "    %s(self, %s, self.%s)" % (val_name, attr_name, a.name)
+            )
             names_for_globals[val_name] = a.validator
             names_for_globals[attr_name] = a
+
     if post_init:
         lines.append("self.__attrs_post_init__()")
 
-    return """\
-def __init__(self, {args}):
+    # because this is set only after __attrs_post_init__ is called, a crash
+    # will result if post-init tries to access the hash code.  This seemed
+    # preferable to setting this beforehand, in which case alteration to
+    # field values during post-init combined with post-init accessing the
+    # hash code would result in silent bugs.
+    if cache_hash:
+        if frozen:
+            if slots:
+                # if frozen and slots, then _setattr defined above
+                init_hash_cache = "_setattr(self, '%s', %s)"
+            else:
+                # if frozen and not slots, then _inst_dict defined above
+                init_hash_cache = "_inst_dict['%s'] = %s"
+        else:
+            init_hash_cache = "self.%s = %s"
+        lines.append(init_hash_cache % (_hash_cache_field, "None"))
+
+    # For exceptions we rely on BaseException.__init__ for proper
+    # initialization.
+    if is_exc:
+        vals = ",".join("self." + a.name for a in attrs if a.init)
+
+        lines.append("BaseException.__init__(self, %s)" % (vals,))
+
+    args = ", ".join(args)
+    if kw_only_args:
+        args += "%s*, %s" % (
+            ", " if args else "",  # leading comma
+            ", ".join(kw_only_args),  # kw_only args
+        )
+    return (
+        """\
+def {init_name}(self, {args}):
     {lines}
 """.format(
-        args=", ".join(args),
-        lines="\n    ".join(lines) if lines else "pass",
-    ), names_for_globals
-
-
-class Attribute(object):
+            init_name=("__attrs_init__" if attrs_init else "__init__"),
+            args=args,
+            lines="\n    ".join(lines) if lines else "pass",
+        ),
+        names_for_globals,
+        annotations,
+    )
+
+
+class Attribute:
     """
     *Read-only* representation of an attribute.
 
-    :attribute name: The name of the attribute.
-
-    Plus *all* arguments of :func:`attr.ib`.
+    The class has *all* arguments of `attr.ib` (except for ``factory``
+    which is only syntactic sugar for ``default=Factory(...)`` plus the
+    following:
+
+    - ``name`` (`str`): The name of the attribute.
+    - ``inherited`` (`bool`): Whether or not that attribute has been inherited
+      from a base class.
+    - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables
+      that are used for comparing and ordering objects by this attribute,
+      respectively. These are set by passing a callable to `attr.ib`'s ``eq``,
+      ``order``, or ``cmp`` arguments. See also :ref:`comparison customization
+      <custom-comparison>`.
+
+    Instances of this class are frequently used for introspection purposes
+    like:
+
+    - `fields` returns a tuple of them.
+    - Validators get them passed as the first argument.
+    - The :ref:`field transformer <transform-fields>` hook receives a list of
+      them.
+
+    .. versionadded:: 20.1.0 *inherited*
+    .. versionadded:: 20.1.0 *on_setattr*
+    .. versionchanged:: 20.2.0 *inherited* is not taken into account for
+        equality checks and hashing anymore.
+    .. versionadded:: 21.1.0 *eq_key* and *order_key*
+
+    For the full version history of the fields, see `attr.ib`.
     """
+
     __slots__ = (
-        "name", "default", "validator", "repr", "cmp", "hash", "init",
-        "convert", "metadata",
+        "name",
+        "default",
+        "validator",
+        "repr",
+        "eq",
+        "eq_key",
+        "order",
+        "order_key",
+        "hash",
+        "init",
+        "metadata",
+        "type",
+        "converter",
+        "kw_only",
+        "inherited",
+        "on_setattr",
     )
 
-    def __init__(self, name, default, validator, repr, cmp, hash, init,
-                 convert=None, metadata=None):
+    def __init__(
+        self,
+        name,
+        default,
+        validator,
+        repr,
+        cmp,  # XXX: unused, remove along with other cmp code.
+        hash,
+        init,
+        inherited,
+        metadata=None,
+        type=None,
+        converter=None,
+        kw_only=False,
+        eq=None,
+        eq_key=None,
+        order=None,
+        order_key=None,
+        on_setattr=None,
+    ):
+        eq, eq_key, order, order_key = _determine_attrib_eq_order(
+            cmp, eq_key or eq, order_key or order, True
+        )
+
         # Cache this descriptor here to speed things up later.
         bound_setattr = _obj_setattr.__get__(self, Attribute)
 
+        # Despite the big red warning, people *do* instantiate `Attribute`
+        # themselves.
         bound_setattr("name", name)
         bound_setattr("default", default)
         bound_setattr("validator", validator)
         bound_setattr("repr", repr)
-        bound_setattr("cmp", cmp)
+        bound_setattr("eq", eq)
+        bound_setattr("eq_key", eq_key)
+        bound_setattr("order", order)
+        bound_setattr("order_key", order_key)
         bound_setattr("hash", hash)
         bound_setattr("init", init)
-        bound_setattr("convert", convert)
-        bound_setattr("metadata", (metadata_proxy(metadata) if metadata
-                                   else _empty_metadata_singleton))
+        bound_setattr("converter", converter)
+        bound_setattr(
+            "metadata",
+            (
+                types.MappingProxyType(dict(metadata))  # Shallow copy
+                if metadata
+                else _empty_metadata_singleton
+            ),
+        )
+        bound_setattr("type", type)
+        bound_setattr("kw_only", kw_only)
+        bound_setattr("inherited", inherited)
+        bound_setattr("on_setattr", on_setattr)
 
     def __setattr__(self, name, value):
         raise FrozenInstanceError()
 
     @classmethod
-    def from_counting_attr(cls, name, ca):
+    def from_counting_attr(cls, name, ca, type=None):
+        # type holds the annotated value. deal with conflicts:
+        if type is None:
+            type = ca.type
+        elif ca.type is not None:
+            raise ValueError(
+                "Type annotation and type argument cannot both be present"
+            )
         inst_dict = {
             k: getattr(ca, k)
-            for k
-            in Attribute.__slots__
-            if k not in (
-                "name", "validator", "default",
-            )  # exclude methods
+            for k in Attribute.__slots__
+            if k
+            not in (
+                "name",
+                "validator",
+                "default",
+                "type",
+                "inherited",
+            )  # exclude methods and deprecated alias
         }
-        return cls(name=name, validator=ca._validator, default=ca._default,
-                   **inst_dict)
+        return cls(
+            name=name,
+            validator=ca._validator,
+            default=ca._default,
+            type=type,
+            cmp=None,
+            inherited=False,
+            **inst_dict
+        )
+
+    # Don't use attr.evolve since fields(Attribute) doesn't work
+    def evolve(self, **changes):
+        """
+        Copy *self* and apply *changes*.
+
+        This works similarly to `attr.evolve` but that function does not work
+        with ``Attribute``.
+
+        It is mainly meant to be used for `transform-fields`.
+
+        .. versionadded:: 20.3.0
+        """
+        new = copy.copy(self)
+
+        new._setattrs(changes.items())
+
+        return new
 
     # Don't use _add_pickle since fields(Attribute) doesn't work
     def __getstate__(self):
         """
         Play nice with pickle.
         """
-        return tuple(getattr(self, name) if name != "metadata"
-                     else dict(self.metadata)
-                     for name in self.__slots__)
+        return tuple(
+            getattr(self, name) if name != "metadata" else dict(self.metadata)
+            for name in self.__slots__
+        )
 
     def __setstate__(self, state):
         """
         Play nice with pickle.
         """
+        self._setattrs(zip(self.__slots__, state))
+
+    def _setattrs(self, name_values_pairs):
         bound_setattr = _obj_setattr.__get__(self, Attribute)
-        for name, value in zip(self.__slots__, state):
+        for name, value in name_values_pairs:
             if name != "metadata":
                 bound_setattr(name, value)
             else:
-                bound_setattr(name, metadata_proxy(value) if value else
-                              _empty_metadata_singleton)
-
-
-_a = [Attribute(name=name, default=NOTHING, validator=None,
-                repr=True, cmp=True, hash=(name != "metadata"), init=True)
-      for name in Attribute.__slots__]
+                bound_setattr(
+                    name,
+                    types.MappingProxyType(dict(value))
+                    if value
+                    else _empty_metadata_singleton,
+                )
+
+
+_a = [
+    Attribute(
+        name=name,
+        default=NOTHING,
+        validator=None,
+        repr=True,
+        cmp=None,
+        eq=True,
+        order=False,
+        hash=(name != "metadata"),
+        init=True,
+        inherited=False,
+    )
+    for name in Attribute.__slots__
+]
 
 Attribute = _add_hash(
-    _add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a),
-    attrs=[a for a in _a if a.hash]
+    _add_eq(
+        _add_repr(Attribute, attrs=_a),
+        attrs=[a for a in _a if a.name != "inherited"],
+    ),
+    attrs=[a for a in _a if a.hash and a.name != "inherited"],
 )
 
 
-class _CountingAttr(object):
+class _CountingAttr:
     """
     Intermediate representation of attributes that uses a counter to preserve
     the order in which the attributes have been defined.
@@ -903,35 +2664,105 @@
     *Internal* data structure of the attrs library.  Running into is most
     likely the result of a bug like a forgotten `@attr.s` decorator.
     """
-    __slots__ = ("counter", "_default", "repr", "cmp", "hash", "init",
-                 "metadata", "_validator", "convert")
+
+    __slots__ = (
+        "counter",
+        "_default",
+        "repr",
+        "eq",
+        "eq_key",
+        "order",
+        "order_key",
+        "hash",
+        "init",
+        "metadata",
+        "_validator",
+        "converter",
+        "type",
+        "kw_only",
+        "on_setattr",
+    )
     __attrs_attrs__ = tuple(
-        Attribute(name=name, default=NOTHING, validator=None,
-                  repr=True, cmp=True, hash=True, init=True)
-        for name
-        in ("counter", "_default", "repr", "cmp", "hash", "init",)
+        Attribute(
+            name=name,
+            default=NOTHING,
+            validator=None,
+            repr=True,
+            cmp=None,
+            hash=True,
+            init=True,
+            kw_only=False,
+            eq=True,
+            eq_key=None,
+            order=False,
+            order_key=None,
+            inherited=False,
+            on_setattr=None,
+        )
+        for name in (
+            "counter",
+            "_default",
+            "repr",
+            "eq",
+            "order",
+            "hash",
+            "init",
+            "on_setattr",
+        )
     ) + (
-        Attribute(name="metadata", default=None, validator=None,
-                  repr=True, cmp=True, hash=False, init=True),
+        Attribute(
+            name="metadata",
+            default=None,
+            validator=None,
+            repr=True,
+            cmp=None,
+            hash=False,
+            init=True,
+            kw_only=False,
+            eq=True,
+            eq_key=None,
+            order=False,
+            order_key=None,
+            inherited=False,
+            on_setattr=None,
+        ),
     )
     cls_counter = 0
 
-    def __init__(self, default, validator, repr, cmp, hash, init, convert,
-                 metadata):
+    def __init__(
+        self,
+        default,
+        validator,
+        repr,
+        cmp,
+        hash,
+        init,
+        converter,
+        metadata,
+        type,
+        kw_only,
+        eq,
+        eq_key,
+        order,
+        order_key,
+        on_setattr,
+    ):
         _CountingAttr.cls_counter += 1
         self.counter = _CountingAttr.cls_counter
         self._default = default
-        # If validator is a list/tuple, wrap it using helper validator.
-        if validator and isinstance(validator, (list, tuple)):
-            self._validator = and_(*validator)
-        else:
-            self._validator = validator
+        self._validator = validator
+        self.converter = converter
         self.repr = repr
-        self.cmp = cmp
+        self.eq = eq
+        self.eq_key = eq_key
+        self.order = order
+        self.order_key = order_key
         self.hash = hash
         self.init = init
-        self.convert = convert
         self.metadata = metadata
+        self.type = type
+        self.kw_only = kw_only
+        self.on_setattr = on_setattr
 
     def validator(self, meth):
         """
@@ -965,15 +2796,14 @@
         return meth
 
 
-_CountingAttr = _add_cmp(_add_repr(_CountingAttr))
-
-
-@attributes(slots=True, init=False)
-class Factory(object):
+_CountingAttr = _add_eq(_add_repr(_CountingAttr))
+
+
+class Factory:
     """
     Stores a factory callable.
 
-    If passed as the default value to :func:`attr.ib`, the factory is used to
+    If passed as the default value to `attrs.field`, the factory is used to
     generate a new value.
 
     :param callable factory: A callable that takes either none or exactly one
@@ -983,8 +2813,8 @@
 
     .. versionadded:: 17.1.0  *takes_self*
     """
-    factory = attr()
-    takes_self = attr()
+
+    __slots__ = ("factory", "takes_self")
 
     def __init__(self, factory, takes_self=False):
         """
@@ -994,47 +2824,122 @@
         self.factory = factory
         self.takes_self = takes_self
 
+    def __getstate__(self):
+        """
+        Play nice with pickle.
+        """
+        return tuple(getattr(self, name) for name in self.__slots__)
+
+    def __setstate__(self, state):
+        """
+        Play nice with pickle.
+        """
+        for name, value in zip(self.__slots__, state):
+            setattr(self, name, value)
+
+
+_f = [
+    Attribute(
+        name=name,
+        default=NOTHING,
+        validator=None,
+        repr=True,
+        cmp=None,
+        eq=True,
+        order=False,
+        hash=True,
+        init=True,
+        inherited=False,
+    )
+    for name in Factory.__slots__
+]
+
+Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
+
 
 def make_class(name, attrs, bases=(object,), **attributes_arguments):
     """
     A quick way to create a new class called *name* with *attrs*.
 
-    :param name: The name for the new class.
-    :type name: str
+    :param str name: The name for the new class.
 
     :param attrs: A list of names or a dictionary of mappings of names to
         attributes.
-    :type attrs: :class:`list` or :class:`dict`
+
+        If *attrs* is a list or an ordered dict (`dict` on Python 3.6+,
+        `collections.OrderedDict` otherwise), the order is deduced from
+        the order of the names or attributes inside *attrs*.  Otherwise the
+        order of the definition of the attributes is used.
+    :type attrs: `list` or `dict`
 
     :param tuple bases: Classes that the new class will subclass.
 
-    :param attributes_arguments: Passed unmodified to :func:`attr.s`.
+    :param attributes_arguments: Passed unmodified to `attr.s`.
 
     :return: A new class with *attrs*.
     :rtype: type
 
-    ..  versionadded:: 17.1.0 *bases*
+    .. versionadded:: 17.1.0 *bases*
+    .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
     """
     if isinstance(attrs, dict):
         cls_dict = attrs
     elif isinstance(attrs, (list, tuple)):
-        cls_dict = dict((a, attr()) for a in attrs)
+        cls_dict = {a: attrib() for a in attrs}
     else:
         raise TypeError("attrs argument must be a dict or a list.")
 
-    return attributes(**attributes_arguments)(type(name, bases, cls_dict))
-
-
-# These are required by whithin this module so we define them here and merely
-# import into .validators.
-
-
-@attributes(slots=True, hash=True)
-class _AndValidator(object):
+    pre_init = cls_dict.pop("__attrs_pre_init__", None)
+    post_init = cls_dict.pop("__attrs_post_init__", None)
+    user_init = cls_dict.pop("__init__", None)
+
+    body = {}
+    if pre_init is not None:
+        body["__attrs_pre_init__"] = pre_init
+    if post_init is not None:
+        body["__attrs_post_init__"] = post_init
+    if user_init is not None:
+        body["__init__"] = user_init
+
+    type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body))
+
+    # For pickling to work, the __module__ variable needs to be set to the
+    # frame where the class is created.  Bypass this step in environments where
+    # sys._getframe is not defined (Jython for example) or sys._getframe is not
+    # defined for arguments greater than 0 (IronPython).
+    try:
+        type_.__module__ = sys._getframe(1).f_globals.get(
+            "__name__", "__main__"
+        )
+    except (AttributeError, ValueError):
+        pass
+
+    # We do it here for proper warnings with meaningful stacklevel.
+    cmp = attributes_arguments.pop("cmp", None)
+    (
+        attributes_arguments["eq"],
+        attributes_arguments["order"],
+    ) = _determine_attrs_eq_order(
+        cmp,
+        attributes_arguments.get("eq"),
+        attributes_arguments.get("order"),
+        True,
+    )
+
+    return _attrs(these=cls_dict, **attributes_arguments)(type_)
+
+
+# These are required by within this module so we define them here and merely
+# import into .validators / .converters.
+
+
+@attrs(slots=True, hash=True)
+class _AndValidator:
     """
     Compose many validators to a single one.
     """
-    _validators = attr()
+
+    _validators = attrib()
 
     def __call__(self, inst, attr, value):
         for v in self._validators:
@@ -1047,16 +2952,55 @@
 
     When called on a value, it runs all wrapped validators.
 
-    :param validators: Arbitrary number of validators.
-    :type validators: callables
+    :param callables validators: Arbitrary number of validators.
 
     .. versionadded:: 17.1.0
     """
     vals = []
     for validator in validators:
         vals.extend(
-            validator._validators if isinstance(validator, _AndValidator)
+            validator._validators
+            if isinstance(validator, _AndValidator)
             else [validator]
         )
 
     return _AndValidator(tuple(vals))
+
+
+def pipe(*converters):
+    """
+    A converter that composes multiple converters into one.
+
+    When called on a value, it runs all wrapped converters, returning the
+    *last* value.
+
+    Type annotations will be inferred from the wrapped converters', if
+    they have any.
+
+    :param callables converters: Arbitrary number of converters.
+
+    .. versionadded:: 20.1.0
+    """
+
+    def pipe_converter(val):
+        for converter in converters:
+            val = converter(val)
+
+        return val
+
+    if not converters:
+        # If the converter list is empty, pipe_converter is the identity.
+        A = typing.TypeVar("A")
+        pipe_converter.__annotations__ = {"val": A, "return": A}
+    else:
+        # Get parameter type from first converter.
+        t = _AnnotationExtractor(converters[0]).get_first_param_type()
+        if t:
+            pipe_converter.__annotations__["val"] = t
+
+        # Get return type from last converter.
+        rt = _AnnotationExtractor(converters[-1]).get_return_type()
+        if rt:
+            pipe_converter.__annotations__["return"] = rt
+
+    return pipe_converter
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/_next_gen.py	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,220 @@
+# SPDX-License-Identifier: MIT
+
+"""
+These are Python 3.6+-only and keyword-only APIs that call `attr.s` and
+`attr.ib` with different default values.
+"""
+
+
+from functools import partial
+
+from . import setters
+from ._funcs import asdict as _asdict
+from ._funcs import astuple as _astuple
+from ._make import (
+    NOTHING,
+    _frozen_setattrs,
+    _ng_default_on_setattr,
+    attrib,
+    attrs,
+)
+from .exceptions import UnannotatedAttributeError
+
+
+def define(
+    maybe_cls=None,
+    *,
+    these=None,
+    repr=None,
+    hash=None,
+    init=None,
+    slots=True,
+    frozen=False,
+    weakref_slot=True,
+    str=False,
+    auto_attribs=None,
+    kw_only=False,
+    cache_hash=False,
+    auto_exc=True,
+    eq=None,
+    order=False,
+    auto_detect=True,
+    getstate_setstate=None,
+    on_setattr=None,
+    field_transformer=None,
+    match_args=True,
+):
+    r"""
+    Define an ``attrs`` class.
+
+    Differences to the classic `attr.s` that it uses underneath:
+
+    - Automatically detect whether or not *auto_attribs* should be `True` (c.f.
+      *auto_attribs* parameter).
+    - If *frozen* is `False`, run converters and validators when setting an
+      attribute by default.
+    - *slots=True*
+
+      .. caution::
+
+         Usually this has only upsides and few visible effects in everyday
+         programming. But it *can* lead to some suprising behaviors, so please
+         make sure to read :term:`slotted classes`.
+    - *auto_exc=True*
+    - *auto_detect=True*
+    - *order=False*
+    - Some options that were only relevant on Python 2 or were kept around for
+      backwards-compatibility have been removed.
+
+    Please note that these are all defaults and you can change them as you
+    wish.
+
+    :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
+       exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
+
+       1. If any attributes are annotated and no unannotated `attrs.fields`\ s
+          are found, it assumes *auto_attribs=True*.
+       2. Otherwise it assumes *auto_attribs=False* and tries to collect
+          `attrs.fields`\ s.
+
+    For now, please refer to `attr.s` for the rest of the parameters.
+
+    .. versionadded:: 20.1.0
+    .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``.
+    """
+
+    def do_it(cls, auto_attribs):
+        return attrs(
+            maybe_cls=cls,
+            these=these,
+            repr=repr,
+            hash=hash,
+            init=init,
+            slots=slots,
+            frozen=frozen,
+            weakref_slot=weakref_slot,
+            str=str,
+            auto_attribs=auto_attribs,
+            kw_only=kw_only,
+            cache_hash=cache_hash,
+            auto_exc=auto_exc,
+            eq=eq,
+            order=order,
+            auto_detect=auto_detect,
+            collect_by_mro=True,
+            getstate_setstate=getstate_setstate,
+            on_setattr=on_setattr,
+            field_transformer=field_transformer,
+            match_args=match_args,
+        )
+
+    def wrap(cls):
+        """
+        Making this a wrapper ensures this code runs during class creation.
+
+        We also ensure that frozen-ness of classes is inherited.
+        """
+        nonlocal frozen, on_setattr
+
+        had_on_setattr = on_setattr not in (None, setters.NO_OP)
+
+        # By default, mutable classes convert & validate on setattr.
+        if frozen is False and on_setattr is None:
+            on_setattr = _ng_default_on_setattr
+
+        # However, if we subclass a frozen class, we inherit the immutability
+        # and disable on_setattr.
+        for base_cls in cls.__bases__:
+            if base_cls.__setattr__ is _frozen_setattrs:
+                if had_on_setattr:
+                    raise ValueError(
+                        "Frozen classes can't use on_setattr "
+                        "(frozen-ness was inherited)."
+                    )
+
+                on_setattr = setters.NO_OP
+                break
+
+        if auto_attribs is not None:
+            return do_it(cls, auto_attribs)
+
+        try:
+            return do_it(cls, True)
+        except UnannotatedAttributeError:
+            return do_it(cls, False)
+
+    # maybe_cls's type depends on the usage of the decorator.  It's a class
+    # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+    if maybe_cls is None:
+        return wrap
+    else:
+        return wrap(maybe_cls)
+
+
+mutable = define
+frozen = partial(define, frozen=True, on_setattr=None)
+
+
+def field(
+    *,
+    default=NOTHING,
+    validator=None,
+    repr=True,
+    hash=None,
+    init=True,
+    metadata=None,
+    converter=None,
+    factory=None,
+    kw_only=False,
+    eq=None,
+    order=None,
+    on_setattr=None,
+):
+    """
+    Identical to `attr.ib`, except keyword-only and with some arguments
+    removed.
+
+    .. versionadded:: 20.1.0
+    """
+    return attrib(
+        default=default,
+        validator=validator,
+        repr=repr,
+        hash=hash,
+        init=init,
+        metadata=metadata,
+        converter=converter,
+        factory=factory,
+        kw_only=kw_only,
+        eq=eq,
+        order=order,
+        on_setattr=on_setattr,
+    )
+
+
+def asdict(inst, *, recurse=True, filter=None, value_serializer=None):
+    """
+    Same as `attr.asdict`, except that collections types are always retained
+    and dict is always used as *dict_factory*.
+
+    .. versionadded:: 21.3.0
+    """
+    return _asdict(
+        inst=inst,
+        recurse=recurse,
+        filter=filter,
+        value_serializer=value_serializer,
+        retain_collection_types=True,
+    )
+
+
+def astuple(inst, *, recurse=True, filter=None):
+    """
+    Same as `attr.astuple`, except that collections types are always retained
+    and `tuple` is always used as the *tuple_factory*.
+
+    .. versionadded:: 21.3.0
+    """
+    return _astuple(
+        inst=inst, recurse=recurse, filter=filter, retain_collection_types=True
+    )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/_version_info.py	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+
+from functools import total_ordering
+
+from ._funcs import astuple
+from ._make import attrib, attrs
+
+
+@total_ordering
+@attrs(eq=False, order=False, slots=True, frozen=True)
+class VersionInfo:
+    """
+    A version object that can be compared to tuple of length 1--4:
+
+    >>> attr.VersionInfo(19, 1, 0, "final")  <= (19, 2)
+    True
+    >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
+    True
+    >>> vi = attr.VersionInfo(19, 2, 0, "final")
+    >>> vi < (19, 1, 1)
+    False
+    >>> vi < (19,)
+    False
+    >>> vi == (19, 2,)
+    True
+    >>> vi == (19, 2, 1)
+    False
+
+    .. versionadded:: 19.2
+    """
+
+    year = attrib(type=int)
+    minor = attrib(type=int)
+    micro = attrib(type=int)
+    releaselevel = attrib(type=str)
+
+    @classmethod
+    def _from_version_string(cls, s):
+        """
+        Parse *s* and return a _VersionInfo.
+        """
+        v = s.split(".")
+        if len(v) == 3:
+            v.append("final")
+
+        return cls(
+            year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
+        )
+
+    def _ensure_tuple(self, other):
+        """
+        Ensure *other* is a tuple of a valid length.
+
+        Returns a possibly transformed *other* and ourselves as a tuple of
+        the same length as *other*.
+        """
+
+        if self.__class__ is other.__class__:
+            other = astuple(other)
+
+        if not isinstance(other, tuple):
+            raise NotImplementedError
+
+        if not (1 <= len(other) <= 4):
+            raise NotImplementedError
+
+        return astuple(self)[: len(other)], other
+
+    def __eq__(self, other):
+        try:
+            us, them = self._ensure_tuple(other)
+        except NotImplementedError:
+            return NotImplemented
+
+        return us == them
+
+    def __lt__(self, other):
+        try:
+            us, them = self._ensure_tuple(other)
+        except NotImplementedError:
+            return NotImplemented
+
+        # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
+        # have to do anything special with releaselevel for now.
+        return us < them
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/_version_info.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,9 @@
+class VersionInfo:
+    @property
+    def year(self) -> int: ...
+    @property
+    def minor(self) -> int: ...
+    @property
+    def micro(self) -> int: ...
+    @property
+    def releaselevel(self) -> str: ...
--- a/mercurial/thirdparty/attr/converters.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/converters.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,8 +1,22 @@
+# SPDX-License-Identifier: MIT
+
 """
 Commonly useful converters.
 """
 
-from __future__ import absolute_import, division, print_function
+
+import typing
+
+from ._compat import _AnnotationExtractor
+from ._make import NOTHING, Factory, pipe
+
+
+__all__ = [
+    "default_if_none",
+    "optional",
+    "pipe",
+    "to_bool",
+]
 
 
 def optional(converter):
@@ -10,10 +24,13 @@
     A converter that allows an attribute to be optional. An optional attribute
     is one which can be set to ``None``.
 
+    Type annotations will be inferred from the wrapped converter's, if it
+    has any.
+
     :param callable converter: the converter that is used for non-``None``
         values.
 
-    ..  versionadded:: 17.1.0
+    .. versionadded:: 17.1.0
     """
 
     def optional_converter(val):
@@ -21,4 +38,107 @@
             return None
         return converter(val)
 
+    xtr = _AnnotationExtractor(converter)
+
+    t = xtr.get_first_param_type()
+    if t:
+        optional_converter.__annotations__["val"] = typing.Optional[t]
+
+    rt = xtr.get_return_type()
+    if rt:
+        optional_converter.__annotations__["return"] = typing.Optional[rt]
+
     return optional_converter
+
+
+def default_if_none(default=NOTHING, factory=None):
+    """
+    A converter that allows to replace ``None`` values by *default* or the
+    result of *factory*.
+
+    :param default: Value to be used if ``None`` is passed. Passing an instance
+       of `attrs.Factory` is supported, however the ``takes_self`` option
+       is *not*.
+    :param callable factory: A callable that takes no parameters whose result
+       is used if ``None`` is passed.
+
+    :raises TypeError: If **neither** *default* or *factory* is passed.
+    :raises TypeError: If **both** *default* and *factory* are passed.
+    :raises ValueError: If an instance of `attrs.Factory` is passed with
+       ``takes_self=True``.
+
+    .. versionadded:: 18.2.0
+    """
+    if default is NOTHING and factory is None:
+        raise TypeError("Must pass either `default` or `factory`.")
+
+    if default is not NOTHING and factory is not None:
+        raise TypeError(
+            "Must pass either `default` or `factory` but not both."
+        )
+
+    if factory is not None:
+        default = Factory(factory)
+
+    if isinstance(default, Factory):
+        if default.takes_self:
+            raise ValueError(
+                "`takes_self` is not supported by default_if_none."
+            )
+
+        def default_if_none_converter(val):
+            if val is not None:
+                return val
+
+            return default.factory()
+
+    else:
+
+        def default_if_none_converter(val):
+            if val is not None:
+                return val
+
+            return default
+
+    return default_if_none_converter
+
+
+def to_bool(val):
+    """
+    Convert "boolean" strings (e.g., from env. vars.) to real booleans.
+
+    Values mapping to :code:`True`:
+
+    - :code:`True`
+    - :code:`"true"` / :code:`"t"`
+    - :code:`"yes"` / :code:`"y"`
+    - :code:`"on"`
+    - :code:`"1"`
+    - :code:`1`
+
+    Values mapping to :code:`False`:
+
+    - :code:`False`
+    - :code:`"false"` / :code:`"f"`
+    - :code:`"no"` / :code:`"n"`
+    - :code:`"off"`
+    - :code:`"0"`
+    - :code:`0`
+
+    :raises ValueError: for any other value.
+
+    .. versionadded:: 21.3.0
+    """
+    if isinstance(val, str):
+        val = val.lower()
+    truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
+    falsy = {False, "false", "f", "no", "n", "off", "0", 0}
+    try:
+        if val in truthy:
+            return True
+        if val in falsy:
+            return False
+    except TypeError:
+        # Raised when "val" is not hashable (e.g., lists)
+        pass
+    raise ValueError("Cannot convert value to bool: {}".format(val))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/converters.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,13 @@
+from typing import Callable, Optional, TypeVar, overload
+
+from . import _ConverterType
+
+_T = TypeVar("_T")
+
+def pipe(*validators: _ConverterType) -> _ConverterType: ...
+def optional(converter: _ConverterType) -> _ConverterType: ...
+@overload
+def default_if_none(default: _T) -> _ConverterType: ...
+@overload
+def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ...
+def to_bool(val: str) -> bool: ...
--- a/mercurial/thirdparty/attr/exceptions.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/exceptions.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,17 +1,35 @@
-from __future__ import absolute_import, division, print_function
+# SPDX-License-Identifier: MIT
 
 
-class FrozenInstanceError(AttributeError):
+class FrozenError(AttributeError):
     """
-    A frozen/immutable instance has been attempted to be modified.
+    A frozen/immutable instance or attribute have been attempted to be
+    modified.
 
     It mirrors the behavior of ``namedtuples`` by using the same error message
-    and subclassing :exc:`AttributeError`.
+    and subclassing `AttributeError`.
+
+    .. versionadded:: 20.1.0
+    """
+
+    msg = "can't set attribute"
+    args = [msg]
+
+
+class FrozenInstanceError(FrozenError):
+    """
+    A frozen instance has been attempted to be modified.
 
     .. versionadded:: 16.1.0
     """
-    msg = "can't set attribute"
-    args = [msg]
+
+
+class FrozenAttributeError(FrozenError):
+    """
+    A frozen attribute has been attempted to be modified.
+
+    .. versionadded:: 20.1.0
+    """
 
 
 class AttrsAttributeNotFoundError(ValueError):
@@ -37,3 +55,38 @@
 
     .. versionadded:: 17.1.0
     """
+
+
+class UnannotatedAttributeError(RuntimeError):
+    """
+    A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
+    annotation.
+
+    .. versionadded:: 17.3.0
+    """
+
+
+class PythonTooOldError(RuntimeError):
+    """
+    It was attempted to use an ``attrs`` feature that requires a newer Python
+    version.
+
+    .. versionadded:: 18.2.0
+    """
+
+
+class NotCallableError(TypeError):
+    """
+    A ``attr.ib()`` requiring a callable has been set with a value
+    that is not callable.
+
+    .. versionadded:: 19.2.0
+    """
+
+    def __init__(self, msg, value):
+        super(TypeError, self).__init__(msg, value)
+        self.msg = msg
+        self.value = value
+
+    def __str__(self):
+        return str(self.msg)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/exceptions.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,17 @@
+from typing import Any
+
+class FrozenError(AttributeError):
+    msg: str = ...
+
+class FrozenInstanceError(FrozenError): ...
+class FrozenAttributeError(FrozenError): ...
+class AttrsAttributeNotFoundError(ValueError): ...
+class NotAnAttrsClassError(ValueError): ...
+class DefaultAlreadySetError(RuntimeError): ...
+class UnannotatedAttributeError(RuntimeError): ...
+class PythonTooOldError(RuntimeError): ...
+
+class NotCallableError(TypeError):
+    msg: str = ...
+    value: Any = ...
+    def __init__(self, msg: str, value: Any) -> None: ...
--- a/mercurial/thirdparty/attr/filters.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/filters.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,10 +1,9 @@
+# SPDX-License-Identifier: MIT
+
 """
-Commonly useful filters for :func:`attr.asdict`.
+Commonly useful filters for `attr.asdict`.
 """
 
-from __future__ import absolute_import, division, print_function
-
-from ._compat import isclass
 from ._make import Attribute
 
 
@@ -13,19 +12,19 @@
     Returns a tuple of `frozenset`s of classes and attributes.
     """
     return (
-        frozenset(cls for cls in what if isclass(cls)),
+        frozenset(cls for cls in what if isinstance(cls, type)),
         frozenset(cls for cls in what if isinstance(cls, Attribute)),
     )
 
 
 def include(*what):
-    r"""
-    Whitelist *what*.
+    """
+    Include *what*.
 
-    :param what: What to whitelist.
-    :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\ s
+    :param what: What to include.
+    :type what: `list` of `type` or `attrs.Attribute`\\ s
 
-    :rtype: :class:`callable`
+    :rtype: `callable`
     """
     cls, attrs = _split_what(what)
 
@@ -36,13 +35,13 @@
 
 
 def exclude(*what):
-    r"""
-    Blacklist *what*.
+    """
+    Exclude *what*.
 
-    :param what: What to blacklist.
-    :type what: :class:`list` of classes or :class:`attr.Attribute`\ s.
+    :param what: What to exclude.
+    :type what: `list` of classes or `attrs.Attribute`\\ s.
 
-    :rtype: :class:`callable`
+    :rtype: `callable`
     """
     cls, attrs = _split_what(what)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/filters.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,6 @@
+from typing import Any, Union
+
+from . import Attribute, _FilterType
+
+def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ...
+def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ...
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/setters.py	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly used hooks for on_setattr.
+"""
+
+
+from . import _config
+from .exceptions import FrozenAttributeError
+
+
+def pipe(*setters):
+    """
+    Run all *setters* and return the return value of the last one.
+
+    .. versionadded:: 20.1.0
+    """
+
+    def wrapped_pipe(instance, attrib, new_value):
+        rv = new_value
+
+        for setter in setters:
+            rv = setter(instance, attrib, rv)
+
+        return rv
+
+    return wrapped_pipe
+
+
+def frozen(_, __, ___):
+    """
+    Prevent an attribute to be modified.
+
+    .. versionadded:: 20.1.0
+    """
+    raise FrozenAttributeError()
+
+
+def validate(instance, attrib, new_value):
+    """
+    Run *attrib*'s validator on *new_value* if it has one.
+
+    .. versionadded:: 20.1.0
+    """
+    if _config._run_validators is False:
+        return new_value
+
+    v = attrib.validator
+    if not v:
+        return new_value
+
+    v(instance, attrib, new_value)
+
+    return new_value
+
+
+def convert(instance, attrib, new_value):
+    """
+    Run *attrib*'s converter -- if it has one --  on *new_value* and return the
+    result.
+
+    .. versionadded:: 20.1.0
+    """
+    c = attrib.converter
+    if c:
+        return c(new_value)
+
+    return new_value
+
+
+# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
+# autodata stopped working, so the docstring is inlined in the API docs.
+NO_OP = object()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/setters.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,19 @@
+from typing import Any, NewType, NoReturn, TypeVar, cast
+
+from . import Attribute, _OnSetAttrType
+
+_T = TypeVar("_T")
+
+def frozen(
+    instance: Any, attribute: Attribute[Any], new_value: Any
+) -> NoReturn: ...
+def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ...
+def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ...
+
+# convert is allowed to return Any, because they can be chained using pipe.
+def convert(
+    instance: Any, attribute: Attribute[Any], new_value: Any
+) -> Any: ...
+
+_NoOpType = NewType("_NoOpType", object)
+NO_OP: _NoOpType
--- a/mercurial/thirdparty/attr/validators.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/thirdparty/attr/validators.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,24 +1,99 @@
+# SPDX-License-Identifier: MIT
+
 """
 Commonly useful validators.
 """
 
-from __future__ import absolute_import, division, print_function
+
+import operator
+import re
+
+from contextlib import contextmanager
 
-from ._make import attr, attributes, and_, _AndValidator
+from ._config import get_run_validators, set_run_validators
+from ._make import _AndValidator, and_, attrib, attrs
+from .exceptions import NotCallableError
+
+
+try:
+    Pattern = re.Pattern
+except AttributeError:  # Python <3.7 lacks a Pattern type.
+    Pattern = type(re.compile(""))
 
 
 __all__ = [
     "and_",
+    "deep_iterable",
+    "deep_mapping",
+    "disabled",
+    "ge",
+    "get_disabled",
+    "gt",
     "in_",
     "instance_of",
+    "is_callable",
+    "le",
+    "lt",
+    "matches_re",
+    "max_len",
+    "min_len",
     "optional",
     "provides",
+    "set_disabled",
 ]
 
 
-@attributes(repr=False, slots=True, hash=True)
-class _InstanceOfValidator(object):
-    type = attr()
+def set_disabled(disabled):
+    """
+    Globally disable or enable running validators.
+
+    By default, they are run.
+
+    :param disabled: If ``True``, disable running all validators.
+    :type disabled: bool
+
+    .. warning::
+
+        This function is not thread-safe!
+
+    .. versionadded:: 21.3.0
+    """
+    set_run_validators(not disabled)
+
+
+def get_disabled():
+    """
+    Return a bool indicating whether validators are currently disabled or not.
+
+    :return: ``True`` if validators are currently disabled.
+    :rtype: bool
+
+    .. versionadded:: 21.3.0
+    """
+    return not get_run_validators()
+
+
+@contextmanager
+def disabled():
+    """
+    Context manager that disables running validators within its context.
+
+    .. warning::
+
+        This context manager is not thread-safe!
+
+    .. versionadded:: 21.3.0
+    """
+    set_run_validators(False)
+    try:
+        yield
+    finally:
+        set_run_validators(True)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InstanceOfValidator:
+    type = attrib()
 
     def __call__(self, inst, attr, value):
         """
@@ -27,38 +102,116 @@
         if not isinstance(value, self.type):
             raise TypeError(
                 "'{name}' must be {type!r} (got {value!r} that is a "
-                "{actual!r})."
-                .format(name=attr.name, type=self.type,
-                        actual=value.__class__, value=value),
-                attr, self.type, value,
+                "{actual!r}).".format(
+                    name=attr.name,
+                    type=self.type,
+                    actual=value.__class__,
+                    value=value,
+                ),
+                attr,
+                self.type,
+                value,
             )
 
     def __repr__(self):
-        return (
-            "<instance_of validator for type {type!r}>"
-            .format(type=self.type)
+        return "<instance_of validator for type {type!r}>".format(
+            type=self.type
         )
 
 
 def instance_of(type):
     """
-    A validator that raises a :exc:`TypeError` if the initializer is called
-    with a wrong type for this particular attribute (checks are perfomed using
-    :func:`isinstance` therefore it's also valid to pass a tuple of types).
+    A validator that raises a `TypeError` if the initializer is called
+    with a wrong type for this particular attribute (checks are performed using
+    `isinstance` therefore it's also valid to pass a tuple of types).
 
     :param type: The type to check for.
     :type type: type or tuple of types
 
     :raises TypeError: With a human readable error message, the attribute
-        (of type :class:`attr.Attribute`), the expected type, and the value it
+        (of type `attrs.Attribute`), the expected type, and the value it
         got.
     """
     return _InstanceOfValidator(type)
 
 
-@attributes(repr=False, slots=True, hash=True)
-class _ProvidesValidator(object):
-    interface = attr()
+@attrs(repr=False, frozen=True, slots=True)
+class _MatchesReValidator:
+    pattern = attrib()
+    match_func = attrib()
+
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if not self.match_func(value):
+            raise ValueError(
+                "'{name}' must match regex {pattern!r}"
+                " ({value!r} doesn't)".format(
+                    name=attr.name, pattern=self.pattern.pattern, value=value
+                ),
+                attr,
+                self.pattern,
+                value,
+            )
+
+    def __repr__(self):
+        return "<matches_re validator for pattern {pattern!r}>".format(
+            pattern=self.pattern
+        )
+
+
+def matches_re(regex, flags=0, func=None):
+    r"""
+    A validator that raises `ValueError` if the initializer is called
+    with a string that doesn't match *regex*.
+
+    :param regex: a regex string or precompiled pattern to match against
+    :param int flags: flags that will be passed to the underlying re function
+        (default 0)
+    :param callable func: which underlying `re` function to call. Valid options
+        are `re.fullmatch`, `re.search`, and `re.match`; the default ``None``
+        means `re.fullmatch`. For performance reasons, the pattern is always
+        precompiled using `re.compile`.
+
+    .. versionadded:: 19.2.0
+    .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
+    """
+    valid_funcs = (re.fullmatch, None, re.search, re.match)
+    if func not in valid_funcs:
+        raise ValueError(
+            "'func' must be one of {}.".format(
+                ", ".join(
+                    sorted(
+                        e and e.__name__ or "None" for e in set(valid_funcs)
+                    )
+                )
+            )
+        )
+
+    if isinstance(regex, Pattern):
+        if flags:
+            raise TypeError(
+                "'flags' can only be used with a string pattern; "
+                "pass flags to re.compile() instead"
+            )
+        pattern = regex
+    else:
+        pattern = re.compile(regex, flags)
+
+    if func is re.match:
+        match_func = pattern.match
+    elif func is re.search:
+        match_func = pattern.search
+    else:
+        match_func = pattern.fullmatch
+
+    return _MatchesReValidator(pattern, match_func)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _ProvidesValidator:
+    interface = attrib()
 
     def __call__(self, inst, attr, value):
         """
@@ -67,37 +220,40 @@
         if not self.interface.providedBy(value):
             raise TypeError(
                 "'{name}' must provide {interface!r} which {value!r} "
-                "doesn't."
-                .format(name=attr.name, interface=self.interface, value=value),
-                attr, self.interface, value,
+                "doesn't.".format(
+                    name=attr.name, interface=self.interface, value=value
+                ),
+                attr,
+                self.interface,
+                value,
             )
 
     def __repr__(self):
-        return (
-            "<provides validator for interface {interface!r}>"
-            .format(interface=self.interface)
+        return "<provides validator for interface {interface!r}>".format(
+            interface=self.interface
         )
 
 
 def provides(interface):
     """
-    A validator that raises a :exc:`TypeError` if the initializer is called
+    A validator that raises a `TypeError` if the initializer is called
     with an object that does not provide the requested *interface* (checks are
     performed using ``interface.providedBy(value)`` (see `zope.interface
     <https://zopeinterface.readthedocs.io/en/latest/>`_).
 
-    :param zope.interface.Interface interface: The interface to check for.
+    :param interface: The interface to check for.
+    :type interface: ``zope.interface.Interface``
 
     :raises TypeError: With a human readable error message, the attribute
-        (of type :class:`attr.Attribute`), the expected interface, and the
+        (of type `attrs.Attribute`), the expected interface, and the
         value it got.
     """
     return _ProvidesValidator(interface)
 
 
-@attributes(repr=False, slots=True, hash=True)
-class _OptionalValidator(object):
-    validator = attr()
+@attrs(repr=False, slots=True, hash=True)
+class _OptionalValidator:
+    validator = attrib()
 
     def __call__(self, inst, attr, value):
         if value is None:
@@ -106,9 +262,8 @@
         self.validator(inst, attr, value)
 
     def __repr__(self):
-        return (
-            "<optional validator for {what} or None>"
-            .format(what=repr(self.validator))
+        return "<optional validator for {what} or None>".format(
+            what=repr(self.validator)
         )
 
 
@@ -120,7 +275,7 @@
 
     :param validator: A validator (or a list of validators) that is used for
         non-``None`` values.
-    :type validator: callable or :class:`list` of callables.
+    :type validator: callable or `list` of callables.
 
     .. versionadded:: 15.1.0
     .. versionchanged:: 17.1.0 *validator* can be a list of validators.
@@ -130,37 +285,310 @@
     return _OptionalValidator(validator)
 
 
-@attributes(repr=False, slots=True, hash=True)
-class _InValidator(object):
-    options = attr()
+@attrs(repr=False, slots=True, hash=True)
+class _InValidator:
+    options = attrib()
 
     def __call__(self, inst, attr, value):
-        if value not in self.options:
+        try:
+            in_options = value in self.options
+        except TypeError:  # e.g. `1 in "abc"`
+            in_options = False
+
+        if not in_options:
             raise ValueError(
-                "'{name}' must be in {options!r} (got {value!r})"
-                .format(name=attr.name, options=self.options, value=value)
+                "'{name}' must be in {options!r} (got {value!r})".format(
+                    name=attr.name, options=self.options, value=value
+                ),
+                attr,
+                self.options,
+                value,
             )
 
     def __repr__(self):
-        return (
-            "<in_ validator with options {options!r}>"
-            .format(options=self.options)
+        return "<in_ validator with options {options!r}>".format(
+            options=self.options
         )
 
 
 def in_(options):
     """
-    A validator that raises a :exc:`ValueError` if the initializer is called
+    A validator that raises a `ValueError` if the initializer is called
     with a value that does not belong in the options provided.  The check is
     performed using ``value in options``.
 
     :param options: Allowed options.
-    :type options: list, tuple, :class:`enum.Enum`, ...
+    :type options: list, tuple, `enum.Enum`, ...
 
     :raises ValueError: With a human readable error message, the attribute (of
-       type :class:`attr.Attribute`), the expected options, and the value it
+       type `attrs.Attribute`), the expected options, and the value it
        got.
 
     .. versionadded:: 17.1.0
+    .. versionchanged:: 22.1.0
+       The ValueError was incomplete until now and only contained the human
+       readable error message. Now it contains all the information that has
+       been promised since 17.1.0.
     """
     return _InValidator(options)
+
+
+@attrs(repr=False, slots=False, hash=True)
+class _IsCallableValidator:
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if not callable(value):
+            message = (
+                "'{name}' must be callable "
+                "(got {value!r} that is a {actual!r})."
+            )
+            raise NotCallableError(
+                msg=message.format(
+                    name=attr.name, value=value, actual=value.__class__
+                ),
+                value=value,
+            )
+
+    def __repr__(self):
+        return "<is_callable validator>"
+
+
+def is_callable():
+    """
+    A validator that raises a `attr.exceptions.NotCallableError` if the
+    initializer is called with a value for this particular attribute
+    that is not callable.
+
+    .. versionadded:: 19.1.0
+
+    :raises `attr.exceptions.NotCallableError`: With a human readable error
+        message containing the attribute (`attrs.Attribute`) name,
+        and the value it got.
+    """
+    return _IsCallableValidator()
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepIterable:
+    member_validator = attrib(validator=is_callable())
+    iterable_validator = attrib(
+        default=None, validator=optional(is_callable())
+    )
+
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if self.iterable_validator is not None:
+            self.iterable_validator(inst, attr, value)
+
+        for member in value:
+            self.member_validator(inst, attr, member)
+
+    def __repr__(self):
+        iterable_identifier = (
+            ""
+            if self.iterable_validator is None
+            else " {iterable!r}".format(iterable=self.iterable_validator)
+        )
+        return (
+            "<deep_iterable validator for{iterable_identifier}"
+            " iterables of {member!r}>"
+        ).format(
+            iterable_identifier=iterable_identifier,
+            member=self.member_validator,
+        )
+
+
+def deep_iterable(member_validator, iterable_validator=None):
+    """
+    A validator that performs deep validation of an iterable.
+
+    :param member_validator: Validator(s) to apply to iterable members
+    :param iterable_validator: Validator to apply to iterable itself
+        (optional)
+
+    .. versionadded:: 19.1.0
+
+    :raises TypeError: if any sub-validators fail
+    """
+    if isinstance(member_validator, (list, tuple)):
+        member_validator = and_(*member_validator)
+    return _DeepIterable(member_validator, iterable_validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepMapping:
+    key_validator = attrib(validator=is_callable())
+    value_validator = attrib(validator=is_callable())
+    mapping_validator = attrib(default=None, validator=optional(is_callable()))
+
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if self.mapping_validator is not None:
+            self.mapping_validator(inst, attr, value)
+
+        for key in value:
+            self.key_validator(inst, attr, key)
+            self.value_validator(inst, attr, value[key])
+
+    def __repr__(self):
+        return (
+            "<deep_mapping validator for objects mapping {key!r} to {value!r}>"
+        ).format(key=self.key_validator, value=self.value_validator)
+
+
+def deep_mapping(key_validator, value_validator, mapping_validator=None):
+    """
+    A validator that performs deep validation of a dictionary.
+
+    :param key_validator: Validator to apply to dictionary keys
+    :param value_validator: Validator to apply to dictionary values
+    :param mapping_validator: Validator to apply to top-level mapping
+        attribute (optional)
+
+    .. versionadded:: 19.1.0
+
+    :raises TypeError: if any sub-validators fail
+    """
+    return _DeepMapping(key_validator, value_validator, mapping_validator)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _NumberValidator:
+    bound = attrib()
+    compare_op = attrib()
+    compare_func = attrib()
+
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if not self.compare_func(value, self.bound):
+            raise ValueError(
+                "'{name}' must be {op} {bound}: {value}".format(
+                    name=attr.name,
+                    op=self.compare_op,
+                    bound=self.bound,
+                    value=value,
+                )
+            )
+
+    def __repr__(self):
+        return "<Validator for x {op} {bound}>".format(
+            op=self.compare_op, bound=self.bound
+        )
+
+
+def lt(val):
+    """
+    A validator that raises `ValueError` if the initializer is called
+    with a number larger or equal to *val*.
+
+    :param val: Exclusive upper bound for values
+
+    .. versionadded:: 21.3.0
+    """
+    return _NumberValidator(val, "<", operator.lt)
+
+
+def le(val):
+    """
+    A validator that raises `ValueError` if the initializer is called
+    with a number greater than *val*.
+
+    :param val: Inclusive upper bound for values
+
+    .. versionadded:: 21.3.0
+    """
+    return _NumberValidator(val, "<=", operator.le)
+
+
+def ge(val):
+    """
+    A validator that raises `ValueError` if the initializer is called
+    with a number smaller than *val*.
+
+    :param val: Inclusive lower bound for values
+
+    .. versionadded:: 21.3.0
+    """
+    return _NumberValidator(val, ">=", operator.ge)
+
+
+def gt(val):
+    """
+    A validator that raises `ValueError` if the initializer is called
+    with a number smaller or equal to *val*.
+
+    :param val: Exclusive lower bound for values
+
+    .. versionadded:: 21.3.0
+    """
+    return _NumberValidator(val, ">", operator.gt)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MaxLengthValidator:
+    max_length = attrib()
+
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if len(value) > self.max_length:
+            raise ValueError(
+                "Length of '{name}' must be <= {max}: {len}".format(
+                    name=attr.name, max=self.max_length, len=len(value)
+                )
+            )
+
+    def __repr__(self):
+        return "<max_len validator for {max}>".format(max=self.max_length)
+
+
+def max_len(length):
+    """
+    A validator that raises `ValueError` if the initializer is called
+    with a string or iterable that is longer than *length*.
+
+    :param int length: Maximum length of the string or iterable
+
+    .. versionadded:: 21.3.0
+    """
+    return _MaxLengthValidator(length)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MinLengthValidator:
+    min_length = attrib()
+
+    def __call__(self, inst, attr, value):
+        """
+        We use a callable class to be able to change the ``__repr__``.
+        """
+        if len(value) < self.min_length:
+            raise ValueError(
+                "Length of '{name}' must be => {min}: {len}".format(
+                    name=attr.name, min=self.min_length, len=len(value)
+                )
+            )
+
+    def __repr__(self):
+        return "<min_len validator for {min}>".format(min=self.min_length)
+
+
+def min_len(length):
+    """
+    A validator that raises `ValueError` if the initializer is called
+    with a string or iterable that is shorter than *length*.
+
+    :param int length: Minimum length of the string or iterable
+
+    .. versionadded:: 22.1.0
+    """
+    return _MinLengthValidator(length)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/attr/validators.pyi	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,80 @@
+from typing import (
+    Any,
+    AnyStr,
+    Callable,
+    Container,
+    ContextManager,
+    Iterable,
+    List,
+    Mapping,
+    Match,
+    Optional,
+    Pattern,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+    overload,
+)
+
+from . import _ValidatorType
+from . import _ValidatorArgType
+
+_T = TypeVar("_T")
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_T3 = TypeVar("_T3")
+_I = TypeVar("_I", bound=Iterable)
+_K = TypeVar("_K")
+_V = TypeVar("_V")
+_M = TypeVar("_M", bound=Mapping)
+
+def set_disabled(run: bool) -> None: ...
+def get_disabled() -> bool: ...
+def disabled() -> ContextManager[None]: ...
+
+# To be more precise on instance_of use some overloads.
+# If there are more than 3 items in the tuple then we fall back to Any
+@overload
+def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ...
+@overload
+def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ...
+@overload
+def instance_of(
+    type: Tuple[Type[_T1], Type[_T2]]
+) -> _ValidatorType[Union[_T1, _T2]]: ...
+@overload
+def instance_of(
+    type: Tuple[Type[_T1], Type[_T2], Type[_T3]]
+) -> _ValidatorType[Union[_T1, _T2, _T3]]: ...
+@overload
+def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ...
+def provides(interface: Any) -> _ValidatorType[Any]: ...
+def optional(
+    validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]]
+) -> _ValidatorType[Optional[_T]]: ...
+def in_(options: Container[_T]) -> _ValidatorType[_T]: ...
+def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ...
+def matches_re(
+    regex: Union[Pattern[AnyStr], AnyStr],
+    flags: int = ...,
+    func: Optional[
+        Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]]
+    ] = ...,
+) -> _ValidatorType[AnyStr]: ...
+def deep_iterable(
+    member_validator: _ValidatorArgType[_T],
+    iterable_validator: Optional[_ValidatorType[_I]] = ...,
+) -> _ValidatorType[_I]: ...
+def deep_mapping(
+    key_validator: _ValidatorType[_K],
+    value_validator: _ValidatorType[_V],
+    mapping_validator: Optional[_ValidatorType[_M]] = ...,
+) -> _ValidatorType[_M]: ...
+def is_callable() -> _ValidatorType[_T]: ...
+def lt(val: _T) -> _ValidatorType[_T]: ...
+def le(val: _T) -> _ValidatorType[_T]: ...
+def ge(val: _T) -> _ValidatorType[_T]: ...
+def gt(val: _T) -> _ValidatorType[_T]: ...
+def max_len(length: int) -> _ValidatorType[_T]: ...
+def min_len(length: int) -> _ValidatorType[_T]: ...
--- a/mercurial/transaction.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/transaction.py	Thu Mar 02 22:45:44 2023 +0100
@@ -668,49 +668,83 @@
         self._file.close()
         self._backupsfile.close()
 
+        quick = self._can_quick_abort(entries)
         try:
-            if not entries and not self._backupentries:
-                if self._backupjournal:
-                    self._opener.unlink(self._backupjournal)
-                if self._journal:
-                    self._opener.unlink(self._journal)
-                return
-
-            self._report(_(b"transaction abort!\n"))
-
-            try:
-                for cat in sorted(self._abortcallback):
-                    self._abortcallback[cat](self)
-                # Prevent double usage and help clear cycles.
-                self._abortcallback = None
-                _playback(
-                    self._journal,
-                    self._report,
-                    self._opener,
-                    self._vfsmap,
-                    entries,
-                    self._backupentries,
-                    False,
-                    checkambigfiles=self._checkambigfiles,
-                )
-                self._report(_(b"rollback completed\n"))
-            except BaseException as exc:
-                self._report(_(b"rollback failed - please run hg recover\n"))
-                self._report(
-                    _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
-                )
+            if not quick:
+                self._report(_(b"transaction abort!\n"))
+            for cat in sorted(self._abortcallback):
+                self._abortcallback[cat](self)
+            # Prevent double usage and help clear cycles.
+            self._abortcallback = None
+            if quick:
+                self._do_quick_abort(entries)
+            else:
+                self._do_full_abort(entries)
         finally:
             self._journal = None
             self._releasefn(self, False)  # notify failure of transaction
             self._releasefn = None  # Help prevent cycles.
 
+    def _can_quick_abort(self, entries):
+        """False if any semantic content have been written on disk
+
+        True if nothing, except temporary files has been writen on disk."""
+        if entries:
+            return False
+        for e in self._backupentries:
+            if e[1]:
+                return False
+        return True
+
+    def _do_quick_abort(self, entries):
+        """(Silently) do a quick cleanup (see _can_quick_abort)"""
+        assert self._can_quick_abort(entries)
+        tmp_files = [e for e in self._backupentries if not e[1]]
+        for vfs_id, old_path, tmp_path, xxx in tmp_files:
+            vfs = self._vfsmap[vfs_id]
+            try:
+                vfs.unlink(tmp_path)
+            except FileNotFoundError:
+                pass
+        if self._backupjournal:
+            self._opener.unlink(self._backupjournal)
+        if self._journal:
+            self._opener.unlink(self._journal)
+
+    def _do_full_abort(self, entries):
+        """(Noisily) rollback all the change introduced by the transaction"""
+        try:
+            _playback(
+                self._journal,
+                self._report,
+                self._opener,
+                self._vfsmap,
+                entries,
+                self._backupentries,
+                False,
+                checkambigfiles=self._checkambigfiles,
+            )
+            self._report(_(b"rollback completed\n"))
+        except BaseException as exc:
+            self._report(_(b"rollback failed - please run hg recover\n"))
+            self._report(
+                _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
+            )
+
 
 BAD_VERSION_MSG = _(
     b"journal was created by a different version of Mercurial\n"
 )
 
 
-def rollback(opener, vfsmap, file, report, checkambigfiles=None):
+def rollback(
+    opener,
+    vfsmap,
+    file,
+    report,
+    checkambigfiles=None,
+    skip_journal_pattern=None,
+):
     """Rolls back the transaction contained in the given file
 
     Reads the entries in the specified file, and the corresponding
@@ -755,6 +789,9 @@
                         line = line[:-1]
                         l, f, b, c = line.split(b'\0')
                         backupentries.append((l, f, b, bool(c)))
+    if skip_journal_pattern is not None:
+        keep = lambda x: not skip_journal_pattern.match(x[1])
+        backupentries = [x for x in backupentries if keep(x)]
 
     _playback(
         file,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/typelib.py	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,28 @@
+# typelib.py - type hint aliases and support
+#
+# Copyright 2022 Matt Harbison <matt_harbison@yahoo.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import typing
+
+# Note: this is slightly different from pycompat.TYPE_CHECKING, as using
+# pycompat causes the BinaryIO_Proxy type to be resolved to ``object`` when
+# used as the base class during a pytype run.
+TYPE_CHECKING = typing.TYPE_CHECKING
+
+
+# The BinaryIO class provides empty methods, which at runtime means that
+# ``__getattr__`` on the proxy classes won't get called for the methods that
+# should delegate to the internal object.  So to avoid runtime changes because
+# of the required typing inheritance, just use BinaryIO when typechecking, and
+# ``object`` otherwise.
+if TYPE_CHECKING:
+    from typing import (
+        BinaryIO,
+    )
+
+    BinaryIO_Proxy = BinaryIO
+else:
+    BinaryIO_Proxy = object
--- a/mercurial/ui.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/ui.py	Thu Mar 02 22:45:44 2023 +0100
@@ -19,6 +19,21 @@
 import sys
 import traceback
 
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    List,
+    NoReturn,
+    Optional,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+    cast,
+    overload,
+)
+
 from .i18n import _
 from .node import hex
 from .pycompat import (
@@ -48,15 +63,23 @@
     urlutil,
 )
 
+_ConfigItems = Dict[Tuple[bytes, bytes], object]  # {(section, name) : value}
+# The **opts args of the various write() methods can be basically anything, but
+# there's no way to express it as "anything but str".  So type it to be the
+# handful of known types that are used.
+_MsgOpts = Union[bytes, bool, List["_PromptChoice"]]
+_PromptChoice = Tuple[bytes, bytes]
+_Tui = TypeVar('_Tui', bound="ui")
+
 urlreq = util.urlreq
 
 # for use with str.translate(None, _keepalnum), to keep just alphanumerics
-_keepalnum = b''.join(
+_keepalnum: bytes = b''.join(
     c for c in map(pycompat.bytechr, range(256)) if not c.isalnum()
 )
 
 # The config knobs that will be altered (if unset) by ui.tweakdefaults.
-tweakrc = b"""
+tweakrc: bytes = b"""
 [ui]
 # The rollback command is dangerous. As a rule, don't use it.
 rollback = False
@@ -83,7 +106,7 @@
 word-diff = 1
 """
 
-samplehgrcs = {
+samplehgrcs: Dict[bytes, bytes] = {
     b'user': b"""# example user config (see 'hg help config' for more info)
 [ui]
 # name and email, e.g.
@@ -172,7 +195,7 @@
 class httppasswordmgrdbproxy:
     """Delays loading urllib2 until it's needed."""
 
-    def __init__(self):
+    def __init__(self) -> None:
         self._mgr = None
 
     def _get_mgr(self):
@@ -195,7 +218,7 @@
         )
 
 
-def _catchterm(*args):
+def _catchterm(*args) -> NoReturn:
     raise error.SignalInterrupt
 
 
@@ -204,11 +227,11 @@
 _unset = object()
 
 # _reqexithandlers: callbacks run at the end of a request
-_reqexithandlers = []
+_reqexithandlers: List = []
 
 
 class ui:
-    def __init__(self, src=None):
+    def __init__(self, src: Optional["ui"] = None) -> None:
         """Create a fresh new ui object if no src given
 
         Use uimod.ui.load() to create a ui which knows global and user configs.
@@ -303,13 +326,13 @@
                 if k in self.environ:
                     self._exportableenviron[k] = self.environ[k]
 
-    def _new_source(self):
+    def _new_source(self) -> None:
         self._ocfg.new_source()
         self._tcfg.new_source()
         self._ucfg.new_source()
 
     @classmethod
-    def load(cls):
+    def load(cls: Type[_Tui]) -> _Tui:
         """Create a ui and load global and user configs"""
         u = cls()
         # we always trust global config files and environment variables
@@ -335,7 +358,7 @@
         u._new_source()  # anything after that is a different level
         return u
 
-    def _maybetweakdefaults(self):
+    def _maybetweakdefaults(self) -> None:
         if not self.configbool(b'ui', b'tweakdefaults'):
             return
         if self._tweaked or self.plain(b'tweakdefaults'):
@@ -355,17 +378,17 @@
                 if not self.hasconfig(section, name):
                     self.setconfig(section, name, value, b"<tweakdefaults>")
 
-    def copy(self):
+    def copy(self: _Tui) -> _Tui:
         return self.__class__(self)
 
-    def resetstate(self):
+    def resetstate(self) -> None:
         """Clear internal state that shouldn't persist across commands"""
         if self._progbar:
             self._progbar.resetstate()  # reset last-print time of progress bar
         self.httppasswordmgrdb = httppasswordmgrdbproxy()
 
     @contextlib.contextmanager
-    def timeblockedsection(self, key):
+    def timeblockedsection(self, key: bytes):
         # this is open-coded below - search for timeblockedsection to find them
         starttime = util.timer()
         try:
@@ -410,10 +433,10 @@
             finally:
                 self._uninterruptible = False
 
-    def formatter(self, topic, opts):
+    def formatter(self, topic: bytes, opts):
         return formatter.formatter(self, self, topic, opts)
 
-    def _trusted(self, fp, f):
+    def _trusted(self, fp, f: bytes) -> bool:
         st = util.fstat(fp)
         if util.isowner(st):
             return True
@@ -439,7 +462,7 @@
 
     def read_resource_config(
         self, name, root=None, trust=False, sections=None, remap=None
-    ):
+    ) -> None:
         try:
             fp = resourceutil.open_resource(name[0], name[1])
         except IOError:
@@ -453,7 +476,7 @@
 
     def readconfig(
         self, filename, root=None, trust=False, sections=None, remap=None
-    ):
+    ) -> None:
         try:
             fp = open(filename, 'rb')
         except IOError:
@@ -465,7 +488,7 @@
 
     def _readconfig(
         self, filename, fp, root=None, trust=False, sections=None, remap=None
-    ):
+    ) -> None:
         with fp:
             cfg = config.config()
             trusted = sections or trust or self._trusted(fp, filename)
@@ -481,7 +504,9 @@
 
         self._applyconfig(cfg, trusted, root)
 
-    def applyconfig(self, configitems, source=b"", root=None):
+    def applyconfig(
+        self, configitems: _ConfigItems, source=b"", root=None
+    ) -> None:
         """Add configitems from a non-file source.  Unlike with ``setconfig()``,
         they can be overridden by subsequent config file reads.  The items are
         in the same format as ``configoverride()``, namely a dict of the
@@ -497,7 +522,7 @@
 
         self._applyconfig(cfg, True, root)
 
-    def _applyconfig(self, cfg, trusted, root):
+    def _applyconfig(self, cfg, trusted, root) -> None:
         if self.plain():
             for k in (
                 b'debug',
@@ -540,7 +565,7 @@
             root = os.path.expanduser(b'~')
         self.fixconfig(root=root)
 
-    def fixconfig(self, root=None, section=None):
+    def fixconfig(self, root=None, section=None) -> None:
         if section in (None, b'paths'):
             # expand vars and ~
             # translate paths relative to root (or home) into absolute paths
@@ -603,12 +628,12 @@
             self._ucfg.backup(section, item),
         )
 
-    def restoreconfig(self, data):
+    def restoreconfig(self, data) -> None:
         self._ocfg.restore(data[0])
         self._tcfg.restore(data[1])
         self._ucfg.restore(data[2])
 
-    def setconfig(self, section, name, value, source=b''):
+    def setconfig(self, section, name, value, source=b'') -> None:
         for cfg in (self._ocfg, self._tcfg, self._ucfg):
             cfg.set(section, name, value, source)
         self.fixconfig(section=section)
@@ -994,7 +1019,7 @@
             for name, value in self.configitems(section, untrusted):
                 yield section, name, value
 
-    def plain(self, feature=None):
+    def plain(self, feature: Optional[bytes] = None) -> bool:
         """is plain mode active?
 
         Plain mode means that all configuration variables which affect
@@ -1068,46 +1093,16 @@
             )
         return user
 
-    def shortuser(self, user):
+    def shortuser(self, user: bytes) -> bytes:
         """Return a short representation of a user name or email address."""
         if not self.verbose:
             user = stringutil.shortuser(user)
         return user
 
-    def expandpath(self, loc, default=None):
-        """Return repository location relative to cwd or from [paths]"""
-        msg = b'ui.expandpath is deprecated, use `get_*` functions from urlutil'
-        self.deprecwarn(msg, b'6.0')
-        try:
-            p = self.getpath(loc)
-            if p:
-                return p.rawloc
-        except error.RepoError:
-            pass
-
-        if default:
-            try:
-                p = self.getpath(default)
-                if p:
-                    return p.rawloc
-            except error.RepoError:
-                pass
-
-        return loc
-
     @util.propertycache
     def paths(self):
         return urlutil.paths(self)
 
-    def getpath(self, *args, **kwargs):
-        """see paths.getpath for details
-
-        This method exist as `getpath` need a ui for potential warning message.
-        """
-        msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
-        self.deprecwarn(msg, b'6.0')
-        return self.paths.getpath(self, *args, **kwargs)
-
     @property
     def fout(self):
         return self._fout
@@ -1146,14 +1141,18 @@
         self._fmsgout, self._fmsgerr = _selectmsgdests(self)
 
     @contextlib.contextmanager
-    def silent(self, error=False, subproc=False, labeled=False):
+    def silent(
+        self, error: bool = False, subproc: bool = False, labeled: bool = False
+    ):
         self.pushbuffer(error=error, subproc=subproc, labeled=labeled)
         try:
             yield
         finally:
             self.popbuffer()
 
-    def pushbuffer(self, error=False, subproc=False, labeled=False):
+    def pushbuffer(
+        self, error: bool = False, subproc: bool = False, labeled: bool = False
+    ) -> None:
         """install a buffer to capture standard output of the ui object
 
         If error is True, the error output will be captured too.
@@ -1172,7 +1171,7 @@
         self._bufferstates.append((error, subproc, labeled))
         self._bufferapplylabels = labeled
 
-    def popbuffer(self):
+    def popbuffer(self) -> bytes:
         '''pop the last buffer and return the buffered output'''
         self._bufferstates.pop()
         if self._bufferstates:
@@ -1182,25 +1181,25 @@
 
         return b"".join(self._buffers.pop())
 
-    def _isbuffered(self, dest):
+    def _isbuffered(self, dest) -> bool:
         if dest is self._fout:
             return bool(self._buffers)
         if dest is self._ferr:
             return bool(self._bufferstates and self._bufferstates[-1][0])
         return False
 
-    def canwritewithoutlabels(self):
+    def canwritewithoutlabels(self) -> bool:
         '''check if write skips the label'''
         if self._buffers and not self._bufferapplylabels:
             return True
         return self._colormode is None
 
-    def canbatchlabeledwrites(self):
+    def canbatchlabeledwrites(self) -> bool:
         '''check if write calls with labels are batchable'''
         # Windows color printing is special, see ``write``.
         return self._colormode != b'win32'
 
-    def write(self, *args, **opts):
+    def write(self, *args: bytes, **opts: _MsgOpts) -> None:
         """write args to output
 
         By default, this method simply writes to the buffer or stdout.
@@ -1258,10 +1257,10 @@
                 util.timer() - starttime
             ) * 1000
 
-    def write_err(self, *args, **opts):
+    def write_err(self, *args: bytes, **opts: _MsgOpts) -> None:
         self._write(self._ferr, *args, **opts)
 
-    def _write(self, dest, *args, **opts):
+    def _write(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
         # update write() as well if you touch this code
         if self._isbuffered(dest):
             label = opts.get('label', b'')
@@ -1272,7 +1271,7 @@
         else:
             self._writenobuf(dest, *args, **opts)
 
-    def _writenobuf(self, dest, *args, **opts):
+    def _writenobuf(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
         # update write() as well if you touch this code
         if not opts.get('keepprogressbar', False):
             self._progclear()
@@ -1314,7 +1313,7 @@
                 util.timer() - starttime
             ) * 1000
 
-    def _writemsg(self, dest, *args, **opts):
+    def _writemsg(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
         timestamp = self.showtimestamp and opts.get('type') in {
             b'debug',
             b'error',
@@ -1331,10 +1330,10 @@
         if timestamp:
             dest.flush()
 
-    def _writemsgnobuf(self, dest, *args, **opts):
+    def _writemsgnobuf(self, dest, *args: bytes, **opts: _MsgOpts) -> None:
         _writemsgwith(self._writenobuf, dest, *args, **opts)
 
-    def flush(self):
+    def flush(self) -> None:
         # opencode timeblockedsection because this is a critical path
         starttime = util.timer()
         try:
@@ -1354,7 +1353,7 @@
                 util.timer() - starttime
             ) * 1000
 
-    def _isatty(self, fh):
+    def _isatty(self, fh) -> bool:
         if self.configbool(b'ui', b'nontty'):
             return False
         return procutil.isatty(fh)
@@ -1392,10 +1391,10 @@
         finally:
             self.restorefinout(fin, fout)
 
-    def disablepager(self):
+    def disablepager(self) -> None:
         self._disablepager = True
 
-    def pager(self, command):
+    def pager(self, command: bytes) -> None:
         """Start a pager for subsequent command output.
 
         Commands which produce a long stream of output should call
@@ -1476,7 +1475,7 @@
             # warning about a missing pager command.
             self.disablepager()
 
-    def _runpager(self, command, env=None):
+    def _runpager(self, command: bytes, env=None) -> bool:
         """Actually start the pager and set up file descriptors.
 
         This is separate in part so that extensions (like chg) can
@@ -1556,7 +1555,7 @@
         self._exithandlers.append((func, args, kwargs))
         return func
 
-    def interface(self, feature):
+    def interface(self, feature: bytes) -> bytes:
         """what interface to use for interactive console features?
 
         The interface is controlled by the value of `ui.interface` but also by
@@ -1611,12 +1610,12 @@
         defaultinterface = b"text"
         i = self.config(b"ui", b"interface")
         if i in alldefaults:
-            defaultinterface = i
+            defaultinterface = cast(bytes, i)  # cast to help pytype
 
-        choseninterface = defaultinterface
+        choseninterface: bytes = defaultinterface
         f = self.config(b"ui", b"interface.%s" % feature)
         if f in availableinterfaces:
-            choseninterface = f
+            choseninterface = cast(bytes, f)  # cast to help pytype
 
         if i is not None and defaultinterface != i:
             if f is not None:
@@ -1656,7 +1655,7 @@
 
         return i
 
-    def termwidth(self):
+    def termwidth(self) -> int:
         """how wide is the terminal in columns?"""
         if b'COLUMNS' in encoding.environ:
             try:
@@ -1693,7 +1692,11 @@
 
         return i
 
-    def _readline(self, prompt=b' ', promptopts=None):
+    def _readline(
+        self,
+        prompt: bytes = b' ',
+        promptopts: Optional[Dict[str, _MsgOpts]] = None,
+    ) -> bytes:
         # Replacing stdin/stdout temporarily is a hard problem on Python 3
         # because they have to be text streams with *no buffering*. Instead,
         # we use rawinput() only if call_readline() will be invoked by
@@ -1748,14 +1751,38 @@
 
         return line
 
+    if pycompat.TYPE_CHECKING:
+
+        @overload
+        def prompt(self, msg: bytes, default: bytes) -> bytes:
+            pass
+
+        @overload
+        def prompt(self, msg: bytes, default: None) -> Optional[bytes]:
+            pass
+
     def prompt(self, msg, default=b"y"):
         """Prompt user with msg, read response.
         If ui is not interactive, the default is returned.
         """
         return self._prompt(msg, default=default)
 
-    def _prompt(self, msg, **opts):
-        default = opts['default']
+    if pycompat.TYPE_CHECKING:
+
+        @overload
+        def _prompt(
+            self, msg: bytes, default: bytes, **opts: _MsgOpts
+        ) -> bytes:
+            pass
+
+        @overload
+        def _prompt(
+            self, msg: bytes, default: None, **opts: _MsgOpts
+        ) -> Optional[bytes]:
+            pass
+
+    def _prompt(self, msg, default=b'y', **opts):
+        opts = {**opts, 'default': default}
         if not self.interactive():
             self._writemsg(self._fmsgout, msg, b' ', type=b'prompt', **opts)
             self._writemsg(
@@ -1775,7 +1802,7 @@
             raise error.ResponseExpected()
 
     @staticmethod
-    def extractchoices(prompt):
+    def extractchoices(prompt: bytes) -> Tuple[bytes, List[_PromptChoice]]:
         """Extract prompt message and list of choices from specified prompt.
 
         This returns tuple "(message, choices)", and "choices" is the
@@ -1795,6 +1822,9 @@
         # choices containing spaces, ASCII, or basically anything
         # except an ampersand followed by a character.
         m = re.match(br'(?s)(.+?)\$\$([^$]*&[^ $].*)', prompt)
+
+        assert m is not None  # help pytype
+
         msg = m.group(1)
         choices = [p.strip(b' ') for p in m.group(2).split(b'$$')]
 
@@ -1804,7 +1834,7 @@
 
         return (msg, [choicetuple(s) for s in choices])
 
-    def promptchoice(self, prompt, default=0):
+    def promptchoice(self, prompt: bytes, default: int = 0) -> int:
         """Prompt user with a message, read response, and ensure it matches
         one of the provided choices. The prompt is formatted as follows:
 
@@ -1824,7 +1854,9 @@
             # TODO: shouldn't it be a warning?
             self._writemsg(self._fmsgout, _(b"unrecognized response\n"))
 
-    def getpass(self, prompt=None, default=None):
+    def getpass(
+        self, prompt: Optional[bytes] = None, default: Optional[bytes] = None
+    ) -> Optional[bytes]:
         if not self.interactive():
             return default
         try:
@@ -1847,7 +1879,7 @@
         except EOFError:
             raise error.ResponseExpected()
 
-    def status(self, *msg, **opts):
+    def status(self, *msg: bytes, **opts: _MsgOpts) -> None:
         """write status message to output (if ui.quiet is False)
 
         This adds an output label of "ui.status".
@@ -1855,21 +1887,21 @@
         if not self.quiet:
             self._writemsg(self._fmsgout, type=b'status', *msg, **opts)
 
-    def warn(self, *msg, **opts):
+    def warn(self, *msg: bytes, **opts: _MsgOpts) -> None:
         """write warning message to output (stderr)
 
         This adds an output label of "ui.warning".
         """
         self._writemsg(self._fmsgerr, type=b'warning', *msg, **opts)
 
-    def error(self, *msg, **opts):
+    def error(self, *msg: bytes, **opts: _MsgOpts) -> None:
         """write error message to output (stderr)
 
         This adds an output label of "ui.error".
         """
         self._writemsg(self._fmsgerr, type=b'error', *msg, **opts)
 
-    def note(self, *msg, **opts):
+    def note(self, *msg: bytes, **opts: _MsgOpts) -> None:
         """write note to output (if ui.verbose is True)
 
         This adds an output label of "ui.note".
@@ -1877,7 +1909,7 @@
         if self.verbose:
             self._writemsg(self._fmsgout, type=b'note', *msg, **opts)
 
-    def debug(self, *msg, **opts):
+    def debug(self, *msg: bytes, **opts: _MsgOpts) -> None:
         """write debug message to output (if ui.debugflag is True)
 
         This adds an output label of "ui.debug".
@@ -1894,14 +1926,14 @@
 
     def edit(
         self,
-        text,
-        user,
-        extra=None,
+        text: bytes,
+        user: bytes,
+        extra: Optional[Dict[bytes, Any]] = None,  # TODO: value type of bytes?
         editform=None,
         pending=None,
-        repopath=None,
-        action=None,
-    ):
+        repopath: Optional[bytes] = None,
+        action: Optional[bytes] = None,
+    ) -> bytes:
         if action is None:
             self.develwarn(
                 b'action is None but will soon be a required '
@@ -1970,13 +2002,13 @@
 
     def system(
         self,
-        cmd,
+        cmd: bytes,
         environ=None,
-        cwd=None,
-        onerr=None,
-        errprefix=None,
-        blockedtag=None,
-    ):
+        cwd: Optional[bytes] = None,
+        onerr: Optional[Callable[[bytes], Exception]] = None,
+        errprefix: Optional[bytes] = None,
+        blockedtag: Optional[bytes] = None,
+    ) -> int:
         """execute shell command with appropriate output stream. command
         output will be redirected if fout is not stdout.
 
@@ -2003,12 +2035,12 @@
             raise onerr(errmsg)
         return rc
 
-    def _runsystem(self, cmd, environ, cwd, out):
+    def _runsystem(self, cmd: bytes, environ, cwd: Optional[bytes], out) -> int:
         """actually execute the given shell command (can be overridden by
         extensions like chg)"""
         return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
 
-    def traceback(self, exc=None, force=False):
+    def traceback(self, exc=None, force: bool = False):
         """print exception traceback if traceback printing enabled or forced.
         only to call in exception handler. returns true if traceback
         printed."""
@@ -2054,7 +2086,7 @@
         )
 
     @util.propertycache
-    def _progbar(self):
+    def _progbar(self) -> Optional[progress.progbar]:
         """setup the progbar singleton to the ui object"""
         if (
             self.quiet
@@ -2065,14 +2097,16 @@
             return None
         return getprogbar(self)
 
-    def _progclear(self):
+    def _progclear(self) -> None:
         """clear progress bar output if any. use it before any output"""
         if not haveprogbar():  # nothing loaded yet
             return
         if self._progbar is not None and self._progbar.printed:
             self._progbar.clear()
 
-    def makeprogress(self, topic, unit=b"", total=None):
+    def makeprogress(
+        self, topic: bytes, unit: bytes = b"", total: Optional[int] = None
+    ) -> scmutil.progress:
         """Create a progress helper for the specified topic"""
         if getattr(self._fmsgerr, 'structured', False):
             # channel for machine-readable output with metadata, just send
@@ -2104,7 +2138,7 @@
         """Returns a logger of the given name; or None if not registered"""
         return self._loggers.get(name)
 
-    def setlogger(self, name, logger):
+    def setlogger(self, name, logger) -> None:
         """Install logger which can be identified later by the given name
 
         More than one loggers can be registered. Use extension or module
@@ -2112,7 +2146,7 @@
         """
         self._loggers[name] = logger
 
-    def log(self, event, msgfmt, *msgargs, **opts):
+    def log(self, event, msgfmt, *msgargs, **opts) -> None:
         """hook for logging facility extensions
 
         event should be a readily-identifiable subsystem, which will
@@ -2139,7 +2173,7 @@
         finally:
             self._loggers = registeredloggers
 
-    def label(self, msg, label):
+    def label(self, msg: bytes, label: bytes) -> bytes:
         """style msg based on supplied label
 
         If some color mode is enabled, this will add the necessary control
@@ -2153,7 +2187,9 @@
             return color.colorlabel(self, msg, label)
         return msg
 
-    def develwarn(self, msg, stacklevel=1, config=None):
+    def develwarn(
+        self, msg: bytes, stacklevel: int = 1, config: Optional[bytes] = None
+    ) -> None:
         """issue a developer warning message
 
         Use 'stacklevel' to report the offender some layers further up in the
@@ -2185,7 +2221,12 @@
             del curframe
             del calframe
 
-    def deprecwarn(self, msg, version, stacklevel=2):
+    def deprecwarn(
+        self,
+        msg: bytes,
+        version: bytes,
+        stacklevel: int = 2,
+    ) -> None:
         """issue a deprecation warning
 
         - msg: message explaining what is deprecated and how to upgrade,
@@ -2209,7 +2250,7 @@
         return self._exportableenviron
 
     @contextlib.contextmanager
-    def configoverride(self, overrides, source=b""):
+    def configoverride(self, overrides: _ConfigItems, source: bytes = b""):
         """Context manager for temporary config overrides
         `overrides` must be a dict of the following structure:
         {(section, name) : value}"""
@@ -2227,7 +2268,7 @@
             if (b'ui', b'quiet') in overrides:
                 self.fixconfig(section=b'ui')
 
-    def estimatememory(self):
+    def estimatememory(self) -> Optional[int]:
         """Provide an estimate for the available system memory in Bytes.
 
         This can be overriden via ui.available-memory. It returns None, if
@@ -2246,10 +2287,10 @@
 
 # we instantiate one globally shared progress bar to avoid
 # competing progress bars when multiple UI objects get created
-_progresssingleton = None
+_progresssingleton: Optional[progress.progbar] = None
 
 
-def getprogbar(ui):
+def getprogbar(ui: ui) -> progress.progbar:
     global _progresssingleton
     if _progresssingleton is None:
         # passing 'ui' object to the singleton is fishy,
@@ -2258,11 +2299,11 @@
     return _progresssingleton
 
 
-def haveprogbar():
+def haveprogbar() -> bool:
     return _progresssingleton is not None
 
 
-def _selectmsgdests(ui):
+def _selectmsgdests(ui: ui):
     name = ui.config(b'ui', b'message-output')
     if name == b'channel':
         if ui.fmsg:
@@ -2278,7 +2319,7 @@
     raise error.Abort(b'invalid ui.message-output destination: %s' % name)
 
 
-def _writemsgwith(write, dest, *args, **opts):
+def _writemsgwith(write, dest, *args: bytes, **opts: _MsgOpts) -> None:
     """Write ui message with the given ui._write*() function
 
     The specified message type is translated to 'ui.<type>' label if the dest
--- a/mercurial/unionrepo.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/unionrepo.py	Thu Mar 02 22:45:44 2023 +0100
@@ -113,7 +113,7 @@
             self.bundlerevs.add(n)
             n += 1
 
-    def _chunk(self, rev):
+    def _chunk(self, rev, df=None):
         if rev <= self.repotiprev:
             return revlog.revlog._chunk(self, rev)
         return self.revlog2._chunk(self.node(rev))
@@ -146,7 +146,19 @@
             func = super(unionrevlog, self)._revisiondata
         return func(node, _df=_df, raw=raw)
 
-    def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
+    def addrevision(
+        self,
+        text,
+        transaction,
+        link,
+        p1,
+        p2,
+        cachedelta=None,
+        node=None,
+        flags=revlog.REVIDX_DEFAULT_FLAGS,
+        deltacomputer=None,
+        sidedata=None,
+    ):
         raise NotImplementedError
 
     def addgroup(
@@ -157,7 +169,8 @@
         alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
-        maybemissingparents=False,
+        debug_info=None,
+        delta_base_reuse_policy=None,
     ):
         raise NotImplementedError
 
@@ -257,8 +270,8 @@
     def cancopy(self):
         return False
 
-    def peer(self):
-        return unionpeer(self)
+    def peer(self, path=None):
+        return unionpeer(self, path=None)
 
     def getcwd(self):
         return encoding.getcwd()  # always outside the repo
--- a/mercurial/util.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/util.py	Thu Mar 02 22:45:44 2023 +0100
@@ -60,6 +60,7 @@
 
 if pycompat.TYPE_CHECKING:
     from typing import (
+        Iterable,
         Iterator,
         List,
         Optional,
@@ -642,12 +643,12 @@
     ``read()`` and ``readline()``.
     """
 
-    def _fillbuffer(self):
-        res = super(observedbufferedinputpipe, self)._fillbuffer()
+    def _fillbuffer(self, size=_chunksize):
+        res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
 
         fn = getattr(self._input._observer, 'osread', None)
         if fn:
-            fn(res, _chunksize)
+            fn(res, size)
 
         return res
 
@@ -2542,6 +2543,7 @@
         # delegated methods
         self.read = self._fp.read
         self.write = self._fp.write
+        self.writelines = self._fp.writelines
         self.seek = self._fp.seek
         self.tell = self._fp.tell
         self.fileno = self._fp.fileno
@@ -2909,7 +2911,7 @@
 
 
 def iterlines(iterator):
-    # type: (Iterator[bytes]) -> Iterator[bytes]
+    # type: (Iterable[bytes]) -> Iterator[bytes]
     for chunk in iterator:
         for line in chunk.splitlines():
             yield line
@@ -3212,10 +3214,7 @@
 
     The passed argument is anything that has a ``.read(N)`` method.
 
-    >>> try:
-    ...     from StringIO import StringIO as BytesIO
-    ... except ImportError:
-    ...     from io import BytesIO
+    >>> from io import BytesIO
     >>> uvarintdecodestream(BytesIO(b'\\x00'))
     0
     >>> uvarintdecodestream(BytesIO(b'\\x01'))
--- a/mercurial/utils/procutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/utils/procutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -18,6 +18,10 @@
 import threading
 import time
 
+from typing import (
+    BinaryIO,
+)
+
 from ..i18n import _
 from ..pycompat import (
     getattr,
@@ -29,6 +33,7 @@
     error,
     policy,
     pycompat,
+    typelib,
 )
 
 # Import like this to keep import-checker happy
@@ -118,8 +123,8 @@
     return stream
 
 
-class WriteAllWrapper:
-    def __init__(self, orig):
+class WriteAllWrapper(typelib.BinaryIO_Proxy):
+    def __init__(self, orig: BinaryIO):
         self.orig = orig
 
     def __getattr__(self, attr):
@@ -580,7 +585,7 @@
     return _gethgcmd()
 
 
-def rundetached(args, condfn):
+def rundetached(args, condfn) -> int:
     """Execute the argument list in a detached process.
 
     condfn is a callable which is called repeatedly and should return
@@ -616,6 +621,12 @@
         if prevhandler is not None:
             signal.signal(signal.SIGCHLD, prevhandler)
 
+        # pytype seems to get confused by not having a return in the finally
+        # block, and thinks the return value should be Optional[int] here.  It
+        # appears to be https://github.com/google/pytype/issues/938, without
+        # the `with` clause.
+        pass  # pytype: disable=bad-return-type
+
 
 @contextlib.contextmanager
 def uninterruptible(warn):
--- a/mercurial/utils/storageutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/utils/storageutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -190,9 +190,9 @@
 
     ``fileid`` can be:
 
-    * A 20 or 32 byte binary node.
+    * A binary node of appropiate size (e.g. 20/32 Bytes).
     * An integer revision number
-    * A 40 or 64 byte hex node.
+    * A hex node of appropiate size (e.g. 40/64 Bytes).
     * A bytes that can be parsed as an integer representing a revision number.
 
     ``identifier`` is used to populate ``error.LookupError`` with an identifier
@@ -208,14 +208,14 @@
                 b'%d' % fileid, identifier, _(b'no match found')
             )
 
-    if len(fileid) in (20, 32):
+    if len(fileid) == len(store.nullid):
         try:
             store.rev(fileid)
             return fileid
         except error.LookupError:
             pass
 
-    if len(fileid) in (40, 64):
+    if len(fileid) == 2 * len(store.nullid):
         try:
             rawnode = bin(fileid)
             store.rev(rawnode)
@@ -305,6 +305,7 @@
     revisiondata=False,
     assumehaveparentrevisions=False,
     sidedata_helpers=None,
+    debug_info=None,
 ):
     """Generic implementation of ifiledata.emitrevisions().
 
@@ -370,6 +371,10 @@
     ``sidedata_helpers`` (optional)
         If not None, means that sidedata should be included.
         See `revlogutil.sidedata.get_sidedata_helpers`.
+
+    ``debug_info`
+        An optionnal dictionnary to gather information about the bundling
+        process (if present, see config: debug.bundling.stats.
     """
 
     fnode = store.node
@@ -407,31 +412,59 @@
         if rev == nullrev:
             continue
 
+        debug_delta_source = None
+        if debug_info is not None:
+            debug_info['revision-total'] += 1
+
         node = fnode(rev)
         p1rev, p2rev = parents(rev)
 
+        if debug_info is not None:
+            if p1rev != p2rev and p1rev != nullrev and p2rev != nullrev:
+                debug_info['merge-total'] += 1
+
         if deltaparentfn:
             deltaparentrev = deltaparentfn(rev)
+            if debug_info is not None:
+                if deltaparentrev == nullrev:
+                    debug_info['available-full'] += 1
+                else:
+                    debug_info['available-delta'] += 1
+
         else:
             deltaparentrev = nullrev
 
         # Forced delta against previous mode.
         if deltamode == repository.CG_DELTAMODE_PREV:
+            if debug_info is not None:
+                debug_delta_source = "prev"
             baserev = prevrev
 
         # We're instructed to send fulltext. Honor that.
         elif deltamode == repository.CG_DELTAMODE_FULL:
+            if debug_info is not None:
+                debug_delta_source = "full"
             baserev = nullrev
         # We're instructed to use p1. Honor that
         elif deltamode == repository.CG_DELTAMODE_P1:
+            if debug_info is not None:
+                debug_delta_source = "p1"
             baserev = p1rev
 
         # There is a delta in storage. We try to use that because it
         # amounts to effectively copying data from storage and is
         # therefore the fastest.
         elif is_usable_base(deltaparentrev):
+            if debug_info is not None:
+                debug_delta_source = "storage"
+            baserev = deltaparentrev
+        elif deltaparentrev == nullrev:
+            if debug_info is not None:
+                debug_delta_source = "storage"
             baserev = deltaparentrev
         else:
+            if deltaparentrev != nullrev and debug_info is not None:
+                debug_info['denied-base-not-available'] += 1
             # No guarantee the receiver has the delta parent, or Storage has a
             # fulltext revision.
             #
@@ -441,22 +474,37 @@
             # be close to this revision content.
             #
             # note: we could optimize between p1 and p2 in merges cases.
-            if is_usable_base(p1rev):
+            elif is_usable_base(p1rev):
+                if debug_info is not None:
+                    debug_delta_source = "p1"
                 baserev = p1rev
             # if p1 was not an option, try p2
             elif is_usable_base(p2rev):
+                if debug_info is not None:
+                    debug_delta_source = "p2"
                 baserev = p2rev
             # Send delta against prev in despair
             #
             # using the closest available ancestors first might be better?
             elif prevrev is not None:
+                if debug_info is not None:
+                    debug_delta_source = "prev"
                 baserev = prevrev
             else:
+                if debug_info is not None:
+                    debug_delta_source = "full"
                 baserev = nullrev
 
         # But we can't actually use our chosen delta base for whatever
         # reason. Reset to fulltext.
-        if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
+        if (
+            baserev != nullrev
+            and candeltafn is not None
+            and not candeltafn(baserev, rev)
+        ):
+            if debug_info is not None:
+                debug_delta_source = "full"
+                debug_info['denied-delta-candeltafn'] += 1
             baserev = nullrev
 
         revision = None
@@ -468,6 +516,9 @@
                 try:
                     revision = store.rawdata(node)
                 except error.CensoredNodeError as e:
+                    if debug_info is not None:
+                        debug_delta_source = "full"
+                        debug_info['denied-delta-not-available'] += 1
                     revision = e.tombstone
 
                 if baserev != nullrev:
@@ -479,12 +530,46 @@
             elif (
                 baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV
             ):
+                if debug_info is not None:
+                    debug_info['computed-delta'] += 1  # close enough
+                    debug_info['delta-full'] += 1
                 revision = store.rawdata(node)
                 emitted.add(rev)
             else:
                 if revdifffn:
+                    if debug_info is not None:
+                        if debug_delta_source == "full":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-full'] += 1
+                        elif debug_delta_source == "prev":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-prev'] += 1
+                        elif debug_delta_source == "p1":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-p1'] += 1
+                        elif debug_delta_source == "storage":
+                            debug_info['reused-storage-delta'] += 1
+                        else:
+                            assert False, 'unreachable'
+
                     delta = revdifffn(baserev, rev)
                 else:
+                    if debug_info is not None:
+                        if debug_delta_source == "full":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-full'] += 1
+                        elif debug_delta_source == "prev":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-prev'] += 1
+                        elif debug_delta_source == "p1":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-p1'] += 1
+                        elif debug_delta_source == "storage":
+                            # seem quite unlikelry to happens
+                            debug_info['computed-delta'] += 1
+                            debug_info['reused-storage-delta'] += 1
+                        else:
+                            assert False, 'unreachable'
                     delta = mdiff.textdiff(
                         store.rawdata(baserev), store.rawdata(rev)
                     )
--- a/mercurial/utils/stringutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/utils/stringutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -14,6 +14,11 @@
 import textwrap
 import types
 
+from typing import (
+    Optional,
+    overload,
+)
+
 from ..i18n import _
 from ..thirdparty import attr
 
@@ -30,6 +35,16 @@
 regexbytesescapemap = {i: (b'\\' + i) for i in _respecial}
 
 
+@overload
+def reescape(pat: bytes) -> bytes:
+    ...
+
+
+@overload
+def reescape(pat: str) -> str:
+    ...
+
+
 def reescape(pat):
     """Drop-in replacement for re.escape."""
     # NOTE: it is intentional that this works on unicodes and not
@@ -45,12 +60,12 @@
     return pat.encode('latin1')
 
 
-def pprint(o, bprefix=False, indent=0, level=0):
+def pprint(o, bprefix: bool = False, indent: int = 0, level: int = 0) -> bytes:
     """Pretty print an object."""
     return b''.join(pprintgen(o, bprefix=bprefix, indent=indent, level=level))
 
 
-def pprintgen(o, bprefix=False, indent=0, level=0):
+def pprintgen(o, bprefix: bool = False, indent: int = 0, level: int = 0):
     """Pretty print an object to a generator of atoms.
 
     ``bprefix`` is a flag influencing whether bytestrings are preferred with
@@ -250,7 +265,7 @@
         yield pycompat.byterepr(o)
 
 
-def prettyrepr(o):
+def prettyrepr(o) -> bytes:
     """Pretty print a representation of a possibly-nested object"""
     lines = []
     rs = pycompat.byterepr(o)
@@ -281,7 +296,7 @@
     return b'\n'.join(b'  ' * l + s for l, s in lines)
 
 
-def buildrepr(r):
+def buildrepr(r) -> bytes:
     """Format an optional printable representation from unexpanded bits
 
     ========  =================================
@@ -305,12 +320,12 @@
         return pprint(r)
 
 
-def binary(s):
+def binary(s: bytes) -> bool:
     """return true if a string is binary data"""
     return bool(s and b'\0' in s)
 
 
-def _splitpattern(pattern):
+def _splitpattern(pattern: bytes):
     if pattern.startswith(b're:'):
         return b're', pattern[3:]
     elif pattern.startswith(b'literal:'):
@@ -318,7 +333,7 @@
     return b'literal', pattern
 
 
-def stringmatcher(pattern, casesensitive=True):
+def stringmatcher(pattern: bytes, casesensitive: bool = True):
     """
     accepts a string, possibly starting with 're:' or 'literal:' prefix.
     returns the matcher name, pattern, and matcher function.
@@ -379,7 +394,7 @@
     raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
 
 
-def substringregexp(pattern, flags=0):
+def substringregexp(pattern: bytes, flags: int = 0):
     """Build a regexp object from a string pattern possibly starting with
     're:' or 'literal:' prefix.
 
@@ -431,7 +446,7 @@
     raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
 
 
-def shortuser(user):
+def shortuser(user: bytes) -> bytes:
     """Return a short representation of a user name or email address."""
     f = user.find(b'@')
     if f >= 0:
@@ -448,7 +463,7 @@
     return user
 
 
-def emailuser(user):
+def emailuser(user: bytes) -> bytes:
     """Return the user portion of an email address."""
     f = user.find(b'@')
     if f >= 0:
@@ -459,7 +474,7 @@
     return user
 
 
-def email(author):
+def email(author: bytes) -> bytes:
     '''get email of author.'''
     r = author.find(b'>')
     if r == -1:
@@ -467,7 +482,7 @@
     return author[author.find(b'<') + 1 : r]
 
 
-def person(author):
+def person(author: bytes) -> bytes:
     """Returns the name before an email address,
     interpreting it as per RFC 5322
 
@@ -612,7 +627,7 @@
     return mailmap
 
 
-def mapname(mailmap, author):
+def mapname(mailmap, author: bytes) -> bytes:
     """Returns the author field according to the mailmap cache, or
     the original author field.
 
@@ -663,7 +678,7 @@
 _correctauthorformat = remod.compile(br'^[^<]+\s<[^<>]+@[^<>]+>$')
 
 
-def isauthorwellformed(author):
+def isauthorwellformed(author: bytes) -> bool:
     """Return True if the author field is well formed
     (ie "Contributor Name <contrib@email.dom>")
 
@@ -685,7 +700,7 @@
     return _correctauthorformat.match(author) is not None
 
 
-def firstline(text):
+def firstline(text: bytes) -> bytes:
     """Return the first line of the input"""
     # Try to avoid running splitlines() on the whole string
     i = text.find(b'\n')
@@ -697,21 +712,26 @@
         return b''
 
 
-def ellipsis(text, maxlength=400):
+def ellipsis(text: bytes, maxlength: int = 400) -> bytes:
     """Trim string to at most maxlength (default: 400) columns in display."""
     return encoding.trim(text, maxlength, ellipsis=b'...')
 
 
-def escapestr(s):
+def escapestr(s: bytes) -> bytes:
+    # "bytes" is also a typing shortcut for bytes, bytearray, and memoryview
     if isinstance(s, memoryview):
         s = bytes(s)
     # call underlying function of s.encode('string_escape') directly for
     # Python 3 compatibility
+    # pytype: disable=bad-return-type
     return codecs.escape_encode(s)[0]  # pytype: disable=module-attr
+    # pytype: enable=bad-return-type
 
 
-def unescapestr(s):
+def unescapestr(s: bytes) -> bytes:
+    # pytype: disable=bad-return-type
     return codecs.escape_decode(s)[0]  # pytype: disable=module-attr
+    # pytype: enable=bad-return-type
 
 
 def forcebytestr(obj):
@@ -724,7 +744,7 @@
         return pycompat.bytestr(encoding.strtolocal(str(obj)))
 
 
-def uirepr(s):
+def uirepr(s: bytes) -> bytes:
     # Avoid double backslash in Windows path repr()
     return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
 
@@ -838,7 +858,9 @@
     return tw(**kwargs)
 
 
-def wrap(line, width, initindent=b'', hangindent=b''):
+def wrap(
+    line: bytes, width: int, initindent: bytes = b'', hangindent: bytes = b''
+) -> bytes:
     maxindent = max(len(hangindent), len(initindent))
     if width <= maxindent:
         # adjust for weird terminal size
@@ -875,7 +897,7 @@
 }
 
 
-def parsebool(s):
+def parsebool(s: bytes) -> Optional[bool]:
     """Parse s into a boolean.
 
     If s is not a valid boolean, returns None.
@@ -883,7 +905,8 @@
     return _booleans.get(s.lower(), None)
 
 
-def parselist(value):
+# TODO: make arg mandatory (and fix code below?)
+def parselist(value: Optional[bytes]):
     """parse a configuration value as a list of comma/space separated strings
 
     >>> parselist(b'this,is "a small" ,test')
@@ -973,7 +996,7 @@
     return result or []
 
 
-def evalpythonliteral(s):
+def evalpythonliteral(s: bytes):
     """Evaluate a string containing a Python literal expression"""
     # We could backport our tokenizer hack to rewrite '' to u'' if we want
     return ast.literal_eval(s.decode('latin1'))
--- a/mercurial/utils/urlutil.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/utils/urlutil.py	Thu Mar 02 22:45:44 2023 +0100
@@ -24,6 +24,10 @@
     stringutil,
 )
 
+from ..revlogutils import (
+    constants as revlog_constants,
+)
+
 
 if pycompat.TYPE_CHECKING:
     from typing import (
@@ -241,7 +245,7 @@
         u.user = self.user
         u.passwd = self.passwd
         u.host = self.host
-        u.path = self.path
+        u.port = self.port
         u.query = self.query
         u.fragment = self.fragment
         u._localpath = self._localpath
@@ -480,10 +484,10 @@
     if not dests:
         if b'default-push' in ui.paths:
             for p in ui.paths[b'default-push']:
-                yield p
+                yield p.get_push_variant()
         elif b'default' in ui.paths:
             for p in ui.paths[b'default']:
-                yield p
+                yield p.get_push_variant()
         else:
             raise error.ConfigError(
                 _(b'default repository not configured!'),
@@ -493,14 +497,14 @@
         for dest in dests:
             if dest in ui.paths:
                 for p in ui.paths[dest]:
-                    yield p
+                    yield p.get_push_variant()
             else:
                 path = try_path(ui, dest)
                 if path is None:
                     msg = _(b'repository %s does not exist')
                     msg %= dest
                     raise error.RepoError(msg)
-                yield path
+                yield path.get_push_variant()
 
 
 def get_pull_paths(repo, ui, sources):
@@ -522,8 +526,6 @@
     This is useful for command and action that does not support multiple
     destination (yet).
 
-    Note that for now, we cannot get multiple destination so this function is "trivial".
-
     The `action` parameter will be used for the error message.
     """
     if dest is None:
@@ -544,80 +546,61 @@
     return dests[0]
 
 
-def get_unique_pull_path(action, repo, ui, source=None, default_branches=()):
+def get_unique_pull_path_obj(action, ui, source=None):
     """return a unique `(path, branch)` or abort if multiple are found
 
     This is useful for command and action that does not support multiple
     destination (yet).
 
-    Note that for now, we cannot get multiple destination so this function is "trivial".
+    The `action` parameter will be used for the error message.
 
-    The `action` parameter will be used for the error message.
+    note: Ideally, this function would be called `get_unique_pull_path` to
+    mirror the `get_unique_push_path`, but the name was already taken.
     """
-    urls = []
-    if source is None:
-        if b'default' in ui.paths:
-            urls.extend(p.rawloc for p in ui.paths[b'default'])
-        else:
-            # XXX this is the historical default behavior, but that is not
-            # great, consider breaking BC on this.
-            urls.append(b'default')
-    else:
-        if source in ui.paths:
-            urls.extend(p.rawloc for p in ui.paths[source])
-        else:
-            # Try to resolve as a local path or URI.
-            path = try_path(ui, source)
-            if path is not None:
-                urls.append(path.rawloc)
-            else:
-                urls.append(source)
-    if len(urls) != 1:
+    sources = []
+    if source is not None:
+        sources.append(source)
+
+    pull_paths = list(get_pull_paths(None, ui, sources=sources))
+    path_count = len(pull_paths)
+    if path_count != 1:
         if source is None:
             msg = _(
                 b"default path points to %d urls while %s only supports one"
             )
-            msg %= (len(urls), action)
+            msg %= (path_count, action)
         else:
             msg = _(b"path points to %d urls while %s only supports one: %s")
-            msg %= (len(urls), action, source)
+            msg %= (path_count, action, source)
         raise error.Abort(msg)
-    return parseurl(urls[0], default_branches)
+    return pull_paths[0]
+
+
+def get_unique_pull_path(action, repo, ui, source=None, default_branches=()):
+    """return a unique `(url, branch)` or abort if multiple are found
+
+    See `get_unique_pull_path_obj` for details.
+    """
+    path = get_unique_pull_path_obj(action, ui, source=source)
+    return parseurl(path.rawloc, default_branches)
 
 
-def get_clone_path(ui, source, default_branches=()):
-    """return the `(origsource, path, branch)` selected as clone source"""
-    urls = []
-    if source is None:
-        if b'default' in ui.paths:
-            urls.extend(p.rawloc for p in ui.paths[b'default'])
-        else:
-            # XXX this is the historical default behavior, but that is not
-            # great, consider breaking BC on this.
-            urls.append(b'default')
-    else:
-        if source in ui.paths:
-            urls.extend(p.rawloc for p in ui.paths[source])
-        else:
-            # Try to resolve as a local path or URI.
-            path = try_path(ui, source)
-            if path is not None:
-                urls.append(path.rawloc)
-            else:
-                urls.append(source)
-    if len(urls) != 1:
-        if source is None:
-            msg = _(
-                b"default path points to %d urls while only one is supported"
-            )
-            msg %= len(urls)
-        else:
-            msg = _(b"path points to %d urls while only one is supported: %s")
-            msg %= (len(urls), source)
-        raise error.Abort(msg)
-    url = urls[0]
-    clone_path, branch = parseurl(url, default_branches)
-    return url, clone_path, branch
+def get_clone_path_obj(ui, source):
+    """return the `(origsource, url, branch)` selected as clone source"""
+    if source == b'':
+        return None
+    return get_unique_pull_path_obj(b'clone', ui, source=source)
+
+
+def get_clone_path(ui, source, default_branches=None):
+    """return the `(origsource, url, branch)` selected as clone source"""
+    path = get_clone_path_obj(ui, source)
+    if path is None:
+        return (b'', b'', (None, default_branches))
+    if default_branches is None:
+        default_branches = []
+    branches = (path.branch, default_branches)
+    return path.rawloc, path.loc, branches
 
 
 def parseurl(path, branches=None):
@@ -673,43 +656,6 @@
                 new_paths.extend(_chain_path(p, ui, self))
             self[name] = new_paths
 
-    def getpath(self, ui, name, default=None):
-        """Return a ``path`` from a string, falling back to default.
-
-        ``name`` can be a named path or locations. Locations are filesystem
-        paths or URIs.
-
-        Returns None if ``name`` is not a registered path, a URI, or a local
-        path to a repo.
-        """
-        msg = b'getpath is deprecated, use `get_*` functions from urlutil'
-        ui.deprecwarn(msg, b'6.0')
-        # Only fall back to default if no path was requested.
-        if name is None:
-            if not default:
-                default = ()
-            elif not isinstance(default, (tuple, list)):
-                default = (default,)
-            for k in default:
-                try:
-                    return self[k][0]
-                except KeyError:
-                    continue
-            return None
-
-        # Most likely empty string.
-        # This may need to raise in the future.
-        if not name:
-            return None
-        if name in self:
-            return self[name][0]
-        else:
-            # Try to resolve as a local path or URI.
-            path = try_path(ui, name)
-            if path is None:
-                raise error.RepoError(_(b'repository %s does not exist') % name)
-            return path.rawloc
-
 
 _pathsuboptions = {}
 
@@ -736,7 +682,7 @@
     return register
 
 
-@pathsuboption(b'pushurl', b'pushloc')
+@pathsuboption(b'pushurl', b'_pushloc')
 def pushurlpathoption(ui, path, value):
     u = url(value)
     # Actually require a URL.
@@ -788,6 +734,29 @@
     return value
 
 
+DELTA_REUSE_POLICIES = {
+    b'default': None,
+    b'try-base': revlog_constants.DELTA_BASE_REUSE_TRY,
+    b'no-reuse': revlog_constants.DELTA_BASE_REUSE_NO,
+    b'forced': revlog_constants.DELTA_BASE_REUSE_FORCE,
+}
+
+
+@pathsuboption(b'pulled-delta-reuse-policy', b'delta_reuse_policy')
+def delta_reuse_policy(ui, path, value):
+    if value not in DELTA_REUSE_POLICIES:
+        path_name = path.name
+        if path_name is None:
+            # this is an "anonymous" path, config comes from the global one
+            path_name = b'*'
+        msg = _(
+            b'(paths.%s:pulled-delta-reuse-policy has unknown value: "%s")\n'
+        )
+        msg %= (path_name, value)
+        ui.warn(msg)
+    return DELTA_REUSE_POLICIES.get(value)
+
+
 @pathsuboption(b'multi-urls', b'multi_urls')
 def multiurls_pathoption(ui, path, value):
     res = stringutil.parsebool(value)
@@ -848,7 +817,8 @@
         ``ui`` is the ``ui`` instance the path is coming from.
         ``name`` is the symbolic name of the path.
         ``rawloc`` is the raw location, as defined in the config.
-        ``pushloc`` is the raw locations pushes should be made to.
+        ``_pushloc`` is the raw locations pushes should be made to.
+                     (see the `get_push_variant` method)
 
         If ``name`` is not defined, we require that the location be a) a local
         filesystem path with a .hg directory or b) a URL. If not,
@@ -864,21 +834,11 @@
         if not rawloc:
             raise ValueError(b'rawloc must be defined')
 
-        # Locations may define branches via syntax <base>#<branch>.
-        u = url(rawloc)
-        branch = None
-        if u.fragment:
-            branch = u.fragment
-            u.fragment = None
+        self.name = name
 
-        self.url = u
-        # the url from the config/command line before dealing with `path://`
-        self.raw_url = u.copy()
-        self.branch = branch
-
-        self.name = name
-        self.rawloc = rawloc
-        self.loc = b'%s' % u
+        # set by path variant to point to their "non-push" version
+        self.main_path = None
+        self._setup_url(rawloc)
 
         if validate_path:
             self._validate_path()
@@ -892,16 +852,66 @@
 
         self._apply_suboptions(ui, sub_opts)
 
-    def copy(self):
-        """make a copy of this path object"""
+    def _setup_url(self, rawloc):
+        # Locations may define branches via syntax <base>#<branch>.
+        u = url(rawloc)
+        branch = None
+        if u.fragment:
+            branch = u.fragment
+            u.fragment = None
+
+        self.url = u
+        # the url from the config/command line before dealing with `path://`
+        self.raw_url = u.copy()
+        self.branch = branch
+
+        self.rawloc = rawloc
+        self.loc = b'%s' % u
+
+    def copy(self, new_raw_location=None):
+        """make a copy of this path object
+
+        When `new_raw_location` is set, the new path will point to it.
+        This is used by the scheme extension so expand the scheme.
+        """
         new = self.__class__()
         for k, v in self.__dict__.items():
             new_copy = getattr(v, 'copy', None)
             if new_copy is not None:
                 v = new_copy()
             new.__dict__[k] = v
+        if new_raw_location is not None:
+            new._setup_url(new_raw_location)
         return new
 
+    @property
+    def is_push_variant(self):
+        """is this a path variant to be used for pushing"""
+        return self.main_path is not None
+
+    def get_push_variant(self):
+        """get a "copy" of the path, but suitable for pushing
+
+        This means using the value of the `pushurl` option (if any) as the url.
+
+        The original path is available in the `main_path` attribute.
+        """
+        if self.main_path:
+            return self
+        new = self.copy()
+        new.main_path = self
+        if self._pushloc:
+            new._setup_url(self._pushloc)
+        return new
+
+    def pushloc(self):
+        """compatibility layer for the deprecated attributes"""
+        from .. import util  # avoid a cycle
+
+        msg = "don't use path.pushloc, use path.get_push_variant()"
+        util.nouideprecwarn(msg, b"6.5")
+        return self._pushloc
+
     def _validate_path(self):
         # When given a raw location but not a symbolic name, validate the
         # location is valid.
--- a/mercurial/verify.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/verify.py	Thu Mar 02 22:45:44 2023 +0100
@@ -15,6 +15,7 @@
 from . import (
     error,
     pycompat,
+    requirements,
     revlog,
     util,
 )
@@ -210,6 +211,12 @@
         self._crosscheckfiles(filelinkrevs, filenodes)
         totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
 
+        if self.errors:
+            ui.warn(_(b"not checking dirstate because of previous errors\n"))
+            dirstate_errors = 0
+        else:
+            dirstate_errors = self._verify_dirstate()
+
         # final report
         ui.status(
             _(b"checked %d changesets with %d changes to %d files\n")
@@ -225,6 +232,11 @@
                 msg = _(b"(first damaged changeset appears to be %d)\n")
                 msg %= min(self.badrevs)
                 ui.warn(msg)
+            if dirstate_errors:
+                ui.warn(
+                    _(b"dirstate inconsistent with current parent's manifest\n")
+                )
+                ui.warn(_(b"%d dirstate errors\n") % dirstate_errors)
             return 1
         return 0
 
@@ -585,3 +597,25 @@
                 self._warn(_(b"warning: orphan data file '%s'") % f)
 
         return len(files), revisions
+
+    def _verify_dirstate(self):
+        """Check that the dirstate is consistent with the parent's manifest"""
+        repo = self.repo
+        ui = self.ui
+        ui.status(_(b"checking dirstate\n"))
+
+        parent1, parent2 = repo.dirstate.parents()
+        m1 = repo[parent1].manifest()
+        m2 = repo[parent2].manifest()
+        dirstate_errors = 0
+
+        is_narrow = requirements.NARROW_REQUIREMENT in repo.requirements
+        narrow_matcher = repo.narrowmatch() if is_narrow else None
+
+        for err in repo.dirstate.verify(m1, m2, parent1, narrow_matcher):
+            ui.error(err)
+            dirstate_errors += 1
+
+        if dirstate_errors:
+            self.errors += dirstate_errors
+        return dirstate_errors
--- a/mercurial/vfs.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/vfs.py	Thu Mar 02 22:45:44 2023 +0100
@@ -11,6 +11,10 @@
 import stat
 import threading
 
+from typing import (
+    Optional,
+)
+
 from .i18n import _
 from .pycompat import (
     delattr,
@@ -26,7 +30,7 @@
 )
 
 
-def _avoidambig(path, oldstat):
+def _avoidambig(path: bytes, oldstat):
     """Avoid file stat ambiguity forcibly
 
     This function causes copying ``path`` file, if it is owned by
@@ -60,16 +64,17 @@
         '''Prevent instantiation; don't call this from subclasses.'''
         raise NotImplementedError('attempted instantiating ' + str(type(self)))
 
-    def __call__(self, path, mode=b'rb', **kwargs):
+    # TODO: type return, which is util.posixfile wrapped by a proxy
+    def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs):
         raise NotImplementedError
 
-    def _auditpath(self, path, mode):
+    def _auditpath(self, path: bytes, mode: bytes):
         raise NotImplementedError
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         raise NotImplementedError
 
-    def tryread(self, path):
+    def tryread(self, path: bytes) -> bytes:
         '''gracefully return an empty string for missing files'''
         try:
             return self.read(path)
@@ -77,7 +82,7 @@
             pass
         return b""
 
-    def tryreadlines(self, path, mode=b'rb'):
+    def tryreadlines(self, path: bytes, mode: bytes = b'rb'):
         '''gracefully return an empty array for missing files'''
         try:
             return self.readlines(path, mode=mode)
@@ -95,57 +100,61 @@
         """
         return self.__call__
 
-    def read(self, path):
+    def read(self, path: bytes) -> bytes:
         with self(path, b'rb') as fp:
             return fp.read()
 
-    def readlines(self, path, mode=b'rb'):
+    def readlines(self, path: bytes, mode: bytes = b'rb'):
         with self(path, mode=mode) as fp:
             return fp.readlines()
 
-    def write(self, path, data, backgroundclose=False, **kwargs):
+    def write(
+        self, path: bytes, data: bytes, backgroundclose=False, **kwargs
+    ) -> int:
         with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
             return fp.write(data)
 
-    def writelines(self, path, data, mode=b'wb', notindexed=False):
+    def writelines(
+        self, path: bytes, data: bytes, mode: bytes = b'wb', notindexed=False
+    ) -> None:
         with self(path, mode=mode, notindexed=notindexed) as fp:
             return fp.writelines(data)
 
-    def append(self, path, data):
+    def append(self, path: bytes, data: bytes) -> int:
         with self(path, b'ab') as fp:
             return fp.write(data)
 
-    def basename(self, path):
+    def basename(self, path: bytes) -> bytes:
         """return base element of a path (as os.path.basename would do)
 
         This exists to allow handling of strange encoding if needed."""
         return os.path.basename(path)
 
-    def chmod(self, path, mode):
+    def chmod(self, path: bytes, mode: int) -> None:
         return os.chmod(self.join(path), mode)
 
-    def dirname(self, path):
+    def dirname(self, path: bytes) -> bytes:
         """return dirname element of a path (as os.path.dirname would do)
 
         This exists to allow handling of strange encoding if needed."""
         return os.path.dirname(path)
 
-    def exists(self, path=None):
+    def exists(self, path: Optional[bytes] = None) -> bool:
         return os.path.exists(self.join(path))
 
     def fstat(self, fp):
         return util.fstat(fp)
 
-    def isdir(self, path=None):
+    def isdir(self, path: Optional[bytes] = None) -> bool:
         return os.path.isdir(self.join(path))
 
-    def isfile(self, path=None):
+    def isfile(self, path: Optional[bytes] = None) -> bool:
         return os.path.isfile(self.join(path))
 
-    def islink(self, path=None):
+    def islink(self, path: Optional[bytes] = None) -> bool:
         return os.path.islink(self.join(path))
 
-    def isfileorlink(self, path=None):
+    def isfileorlink(self, path: Optional[bytes] = None) -> bool:
         """return whether path is a regular file or a symlink
 
         Unlike isfile, this doesn't follow symlinks."""
@@ -156,7 +165,7 @@
         mode = st.st_mode
         return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
 
-    def _join(self, *paths):
+    def _join(self, *paths: bytes) -> bytes:
         root_idx = 0
         for idx, p in enumerate(paths):
             if os.path.isabs(p) or p.startswith(self._dir_sep):
@@ -166,41 +175,48 @@
         paths = [p for p in paths if p]
         return self._dir_sep.join(paths)
 
-    def reljoin(self, *paths):
+    def reljoin(self, *paths: bytes) -> bytes:
         """join various elements of a path together (as os.path.join would do)
 
         The vfs base is not injected so that path stay relative. This exists
         to allow handling of strange encoding if needed."""
         return self._join(*paths)
 
-    def split(self, path):
+    def split(self, path: bytes):
         """split top-most element of a path (as os.path.split would do)
 
         This exists to allow handling of strange encoding if needed."""
         return os.path.split(path)
 
-    def lexists(self, path=None):
+    def lexists(self, path: Optional[bytes] = None) -> bool:
         return os.path.lexists(self.join(path))
 
-    def lstat(self, path=None):
+    def lstat(self, path: Optional[bytes] = None):
         return os.lstat(self.join(path))
 
-    def listdir(self, path=None):
+    def listdir(self, path: Optional[bytes] = None):
         return os.listdir(self.join(path))
 
-    def makedir(self, path=None, notindexed=True):
+    def makedir(self, path: Optional[bytes] = None, notindexed=True):
         return util.makedir(self.join(path), notindexed)
 
-    def makedirs(self, path=None, mode=None):
+    def makedirs(
+        self, path: Optional[bytes] = None, mode: Optional[int] = None
+    ):
         return util.makedirs(self.join(path), mode)
 
-    def makelock(self, info, path):
+    def makelock(self, info, path: bytes):
         return util.makelock(info, self.join(path))
 
-    def mkdir(self, path=None):
+    def mkdir(self, path: Optional[bytes] = None):
         return os.mkdir(self.join(path))
 
-    def mkstemp(self, suffix=b'', prefix=b'tmp', dir=None):
+    def mkstemp(
+        self,
+        suffix: bytes = b'',
+        prefix: bytes = b'tmp',
+        dir: Optional[bytes] = None,
+    ):
         fd, name = pycompat.mkstemp(
             suffix=suffix, prefix=prefix, dir=self.join(dir)
         )
@@ -210,13 +226,13 @@
         else:
             return fd, fname
 
-    def readdir(self, path=None, stat=None, skip=None):
+    def readdir(self, path: Optional[bytes] = None, stat=None, skip=None):
         return util.listdir(self.join(path), stat, skip)
 
-    def readlock(self, path):
+    def readlock(self, path: bytes) -> bytes:
         return util.readlock(self.join(path))
 
-    def rename(self, src, dst, checkambig=False):
+    def rename(self, src: bytes, dst: bytes, checkambig=False):
         """Rename from src to dst
 
         checkambig argument is used with util.filestat, and is useful
@@ -238,18 +254,20 @@
             return ret
         return util.rename(srcpath, dstpath)
 
-    def readlink(self, path):
+    def readlink(self, path: bytes) -> bytes:
         return util.readlink(self.join(path))
 
-    def removedirs(self, path=None):
+    def removedirs(self, path: Optional[bytes] = None):
         """Remove a leaf directory and all empty intermediate ones"""
         return util.removedirs(self.join(path))
 
-    def rmdir(self, path=None):
+    def rmdir(self, path: Optional[bytes] = None):
         """Remove an empty directory."""
         return os.rmdir(self.join(path))
 
-    def rmtree(self, path=None, ignore_errors=False, forcibly=False):
+    def rmtree(
+        self, path: Optional[bytes] = None, ignore_errors=False, forcibly=False
+    ):
         """Remove a directory tree recursively
 
         If ``forcibly``, this tries to remove READ-ONLY files, too.
@@ -272,28 +290,30 @@
             self.join(path), ignore_errors=ignore_errors, onerror=onerror
         )
 
-    def setflags(self, path, l, x):
+    def setflags(self, path: bytes, l: bool, x: bool):
         return util.setflags(self.join(path), l, x)
 
-    def stat(self, path=None):
+    def stat(self, path: Optional[bytes] = None):
         return os.stat(self.join(path))
 
-    def unlink(self, path=None):
+    def unlink(self, path: Optional[bytes] = None):
         return util.unlink(self.join(path))
 
-    def tryunlink(self, path=None):
+    def tryunlink(self, path: Optional[bytes] = None):
         """Attempt to remove a file, ignoring missing file errors."""
         util.tryunlink(self.join(path))
 
-    def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
+    def unlinkpath(
+        self, path: Optional[bytes] = None, ignoremissing=False, rmdir=True
+    ):
         return util.unlinkpath(
             self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
         )
 
-    def utime(self, path=None, t=None):
+    def utime(self, path: Optional[bytes] = None, t=None):
         return os.utime(self.join(path), t)
 
-    def walk(self, path=None, onerror=None):
+    def walk(self, path: Optional[bytes] = None, onerror=None):
         """Yield (dirpath, dirs, files) tuple for each directories under path
 
         ``dirpath`` is relative one from the root of this vfs. This
@@ -360,7 +380,7 @@
 
     def __init__(
         self,
-        base,
+        base: bytes,
         audit=True,
         cacheaudited=False,
         expandpath=False,
@@ -381,7 +401,7 @@
         self.options = {}
 
     @util.propertycache
-    def _cansymlink(self):
+    def _cansymlink(self) -> bool:
         return util.checklink(self.base)
 
     @util.propertycache
@@ -393,7 +413,7 @@
             return
         os.chmod(name, self.createmode & 0o666)
 
-    def _auditpath(self, path, mode):
+    def _auditpath(self, path, mode) -> None:
         if self._audit:
             if os.path.isabs(path) and path.startswith(self.base):
                 path = os.path.relpath(path, self.base)
@@ -402,10 +422,35 @@
                 raise error.Abort(b"%s: %r" % (r, path))
             self.audit(path, mode=mode)
 
+    def isfileorlink_checkdir(
+        self, dircache, path: Optional[bytes] = None
+    ) -> bool:
+        """return True if the path is a regular file or a symlink and
+        the directories along the path are "normal", that is
+        not symlinks or nested hg repositories.
+
+        Ignores the `_audit` setting, and checks the directories regardless.
+        `dircache` is used to cache the directory checks.
+        """
+        try:
+            for prefix in pathutil.finddirs_rev_noroot(util.localpath(path)):
+                if prefix in dircache:
+                    res = dircache[prefix]
+                else:
+                    res = pathutil.pathauditor._checkfs_exists(
+                        self.base, prefix, path
+                    )
+                    dircache[prefix] = res
+                if not res:
+                    return False
+        except (OSError, error.Abort):
+            return False
+        return self.isfileorlink(path)
+
     def __call__(
         self,
-        path,
-        mode=b"r",
+        path: bytes,
+        mode: bytes = b"rb",
         atomictemp=False,
         notindexed=False,
         backgroundclose=False,
@@ -518,7 +563,7 @@
 
         return fp
 
-    def symlink(self, src, dst):
+    def symlink(self, src: bytes, dst: bytes) -> None:
         self.audit(dst)
         linkname = self.join(dst)
         util.tryunlink(linkname)
@@ -538,7 +583,7 @@
         else:
             self.write(dst, src)
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         if path:
             parts = [self.base, path]
             parts.extend(insidef)
@@ -551,7 +596,7 @@
 
 
 class proxyvfs(abstractvfs):
-    def __init__(self, vfs):
+    def __init__(self, vfs: "vfs"):
         self.vfs = vfs
 
     def _auditpath(self, path, mode):
@@ -569,14 +614,14 @@
 class filtervfs(proxyvfs, abstractvfs):
     '''Wrapper vfs for filtering filenames with a function.'''
 
-    def __init__(self, vfs, filter):
+    def __init__(self, vfs: "vfs", filter):
         proxyvfs.__init__(self, vfs)
         self._filter = filter
 
-    def __call__(self, path, *args, **kwargs):
+    def __call__(self, path: bytes, *args, **kwargs):
         return self.vfs(self._filter(path), *args, **kwargs)
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         if path:
             return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
         else:
@@ -589,15 +634,15 @@
 class readonlyvfs(proxyvfs):
     '''Wrapper vfs preventing any writing.'''
 
-    def __init__(self, vfs):
+    def __init__(self, vfs: "vfs"):
         proxyvfs.__init__(self, vfs)
 
-    def __call__(self, path, mode=b'r', *args, **kw):
+    def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw):
         if mode not in (b'r', b'rb'):
             raise error.Abort(_(b'this vfs is read only'))
         return self.vfs(path, mode, *args, **kw)
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         return self.vfs.join(path, *insidef)
 
 
--- a/mercurial/win32.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/win32.py	Thu Mar 02 22:45:44 2023 +0100
@@ -14,6 +14,13 @@
 import random
 import subprocess
 
+from typing import (
+    List,
+    NoReturn,
+    Optional,
+    Tuple,
+)
+
 from . import (
     encoding,
     pycompat,
@@ -356,7 +363,7 @@
 _kernel32.PeekNamedPipe.restype = _BOOL
 
 
-def _raiseoserror(name):
+def _raiseoserror(name: bytes) -> NoReturn:
     # Force the code to a signed int to avoid an 'int too large' error.
     # See https://bugs.python.org/issue28474
     code = _kernel32.GetLastError()
@@ -368,7 +375,7 @@
     )
 
 
-def _getfileinfo(name):
+def _getfileinfo(name: bytes) -> _BY_HANDLE_FILE_INFORMATION:
     fh = _kernel32.CreateFileA(
         name,
         0,
@@ -389,7 +396,7 @@
         _kernel32.CloseHandle(fh)
 
 
-def checkcertificatechain(cert, build=True):
+def checkcertificatechain(cert: bytes, build: bool = True) -> bool:
     """Tests the given certificate to see if there is a complete chain to a
     trusted root certificate.  As a side effect, missing certificates are
     downloaded and installed unless ``build=False``.  True is returned if a
@@ -439,7 +446,7 @@
         _crypt32.CertFreeCertificateContext(certctx)
 
 
-def oslink(src, dst):
+def oslink(src: bytes, dst: bytes) -> None:
     try:
         if not _kernel32.CreateHardLinkA(dst, src, None):
             _raiseoserror(src)
@@ -447,12 +454,12 @@
         _raiseoserror(src)
 
 
-def nlinks(name):
+def nlinks(name: bytes) -> int:
     '''return number of hardlinks for the given file'''
     return _getfileinfo(name).nNumberOfLinks
 
 
-def samefile(path1, path2):
+def samefile(path1: bytes, path2: bytes) -> bool:
     '''Returns whether path1 and path2 refer to the same file or directory.'''
     res1 = _getfileinfo(path1)
     res2 = _getfileinfo(path2)
@@ -463,14 +470,14 @@
     )
 
 
-def samedevice(path1, path2):
+def samedevice(path1: bytes, path2: bytes) -> bool:
     '''Returns whether path1 and path2 are on the same device.'''
     res1 = _getfileinfo(path1)
     res2 = _getfileinfo(path2)
     return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
 
 
-def peekpipe(pipe):
+def peekpipe(pipe) -> int:
     handle = msvcrt.get_osfhandle(pipe.fileno())  # pytype: disable=module-attr
     avail = _DWORD()
 
@@ -485,14 +492,14 @@
     return avail.value
 
 
-def lasterrorwaspipeerror(err):
+def lasterrorwaspipeerror(err) -> bool:
     if err.errno != errno.EINVAL:
         return False
     err = _kernel32.GetLastError()
     return err == _ERROR_BROKEN_PIPE or err == _ERROR_NO_DATA
 
 
-def testpid(pid):
+def testpid(pid: int) -> bool:
     """return True if pid is still running or unable to
     determine, False otherwise"""
     h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
@@ -506,7 +513,7 @@
     return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
 
 
-def executablepath():
+def executablepath() -> bytes:
     '''return full path of hg.exe'''
     size = 600
     buf = ctypes.create_string_buffer(size + 1)
@@ -520,7 +527,7 @@
     return buf.value
 
 
-def getvolumename(path):
+def getvolumename(path: bytes) -> Optional[bytes]:
     """Get the mount point of the filesystem from a directory or file
     (best-effort)
 
@@ -541,7 +548,7 @@
     return buf.value
 
 
-def getfstype(path):
+def getfstype(path: bytes) -> Optional[bytes]:
     """Get the filesystem type name from a directory or file (best-effort)
 
     Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
@@ -572,7 +579,7 @@
     return name.value
 
 
-def getuser():
+def getuser() -> bytes:
     '''return name of current user'''
     size = _DWORD(300)
     buf = ctypes.create_string_buffer(size.value + 1)
@@ -581,10 +588,10 @@
     return buf.value
 
 
-_signalhandler = []
+_signalhandler: List[_SIGNAL_HANDLER] = []
 
 
-def setsignalhandler():
+def setsignalhandler() -> None:
     """Register a termination handler for console events including
     CTRL+C. python signal handlers do not work well with socket
     operations.
@@ -601,7 +608,7 @@
         raise ctypes.WinError()  # pytype: disable=module-attr
 
 
-def hidewindow():
+def hidewindow() -> None:
     def callback(hwnd, pid):
         wpid = _DWORD()
         _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
@@ -614,7 +621,7 @@
     _user32.EnumWindows(_WNDENUMPROC(callback), pid)
 
 
-def termsize():
+def termsize() -> Tuple[int, int]:
     # cmd.exe does not handle CR like a unix console, the CR is
     # counted in the line length. On 80 columns consoles, if 80
     # characters are written, the following CR won't apply on the
@@ -635,7 +642,7 @@
     return width, height
 
 
-def enablevtmode():
+def enablevtmode() -> bool:
     """Enable virtual terminal mode for the associated console.  Return True if
     enabled, else False."""
 
@@ -661,7 +668,7 @@
     return True
 
 
-def spawndetached(args):
+def spawndetached(args: List[bytes]) -> int:
     # No standard library function really spawns a fully detached
     # process under win32 because they allocate pipes or other objects
     # to handle standard streams communications. Passing these objects
@@ -703,7 +710,7 @@
     return pi.dwProcessId
 
 
-def unlink(f):
+def unlink(f: bytes) -> None:
     '''try to implement POSIX' unlink semantics on Windows'''
 
     if os.path.isdir(f):
@@ -758,7 +765,7 @@
             pass
 
 
-def makedir(path, notindexed):
+def makedir(path: bytes, notindexed: bool) -> None:
     os.mkdir(path)
     if notindexed:
         _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
--- a/mercurial/windows.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/windows.py	Thu Mar 02 22:45:44 2023 +0100
@@ -14,8 +14,24 @@
 import stat
 import string
 import sys
+import typing
 import winreg  # pytype: disable=import-error
 
+from typing import (
+    AnyStr,
+    BinaryIO,
+    Iterable,
+    Iterator,
+    List,
+    Mapping,
+    NoReturn,
+    Optional,
+    Pattern,
+    Sequence,
+    Tuple,
+    Union,
+)
+
 from .i18n import _
 from .pycompat import getattr
 from . import (
@@ -23,6 +39,7 @@
     error,
     policy,
     pycompat,
+    typelib,
     win32,
 )
 
@@ -44,7 +61,19 @@
 testpid = win32.testpid
 unlink = win32.unlink
 
-umask = 0o022
+if typing.TYPE_CHECKING:
+    # Replace the various overloads that come along with aliasing stdlib methods
+    # with the narrow definition that we care about in the type checking phase
+    # only.  This ensures that both Windows and POSIX see only the definition
+    # that is actually available.
+    #
+    # Note that if we check pycompat.TYPE_CHECKING here, it is always False, and
+    # the methods aren't replaced.
+    def split(p: bytes) -> Tuple[bytes, bytes]:
+        raise NotImplementedError
+
+
+umask: int = 0o022
 
 
 class mixedfilemodewrapper:
@@ -178,15 +207,7 @@
 listdir = osutil.listdir
 
 
-# copied from .utils.procutil, remove after Python 2 support was dropped
-def _isatty(fp):
-    try:
-        return fp.isatty()
-    except AttributeError:
-        return False
-
-
-def get_password():
+def get_password() -> bytes:
     """Prompt for password with echo off, using Windows getch().
 
     This shouldn't be called directly- use ``ui.getpass()`` instead, which
@@ -208,7 +229,7 @@
     return encoding.unitolocal(pw)
 
 
-class winstdout:
+class winstdout(typelib.BinaryIO_Proxy):
     """Some files on Windows misbehave.
 
     When writing to a broken pipe, EINVAL instead of EPIPE may be raised.
@@ -217,7 +238,7 @@
     error may happen. Python 3 already works around that.
     """
 
-    def __init__(self, fp):
+    def __init__(self, fp: BinaryIO):
         self.fp = fp
 
     def __getattr__(self, key):
@@ -247,11 +268,11 @@
             raise IOError(errno.EPIPE, 'Broken pipe')
 
 
-def openhardlinks():
+def openhardlinks() -> bool:
     return True
 
 
-def parsepatchoutput(output_line):
+def parsepatchoutput(output_line: bytes) -> bytes:
     """parses the output produced by patch and returns the filename"""
     pf = output_line[14:]
     if pf[0] == b'`':
@@ -259,7 +280,9 @@
     return pf
 
 
-def sshargs(sshcmd, host, user, port):
+def sshargs(
+    sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes]
+) -> bytes:
     '''Build argument list for ssh or Plink'''
     pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p'
     args = user and (b"%s@%s" % (user, host)) or host
@@ -274,23 +297,28 @@
     return args
 
 
-def setflags(f, l, x):
-    pass
-
-
-def copymode(src, dst, mode=None, enforcewritable=False):
+def setflags(f: bytes, l: bool, x: bool) -> None:
     pass
 
 
-def checkexec(path):
+def copymode(
+    src: bytes,
+    dst: bytes,
+    mode: Optional[bytes] = None,
+    enforcewritable: bool = False,
+) -> None:
+    pass
+
+
+def checkexec(path: bytes) -> bool:
     return False
 
 
-def checklink(path):
+def checklink(path: bytes) -> bool:
     return False
 
 
-def setbinary(fd):
+def setbinary(fd) -> None:
     # When run without console, pipes may expose invalid
     # fileno(), usually set to -1.
     fno = getattr(fd, 'fileno', None)
@@ -298,27 +326,28 @@
         msvcrt.setmode(fno(), os.O_BINARY)  # pytype: disable=module-attr
 
 
-def pconvert(path):
+def pconvert(path: bytes) -> bytes:
     return path.replace(pycompat.ossep, b'/')
 
 
-def localpath(path):
+def localpath(path: bytes) -> bytes:
     return path.replace(b'/', b'\\')
 
 
-def normpath(path):
+def normpath(path: bytes) -> bytes:
     return pconvert(os.path.normpath(path))
 
 
-def normcase(path):
+def normcase(path: bytes) -> bytes:
     return encoding.upper(path)  # NTFS compares via upper()
 
 
-DRIVE_RE_B = re.compile(b'^[a-z]:')
-DRIVE_RE_S = re.compile('^[a-z]:')
+DRIVE_RE_B: Pattern[bytes] = re.compile(b'^[a-z]:')
+DRIVE_RE_S: Pattern[str] = re.compile('^[a-z]:')
 
 
-def abspath(path):
+# TODO: why is this accepting str?
+def abspath(path: AnyStr) -> AnyStr:
     abs_path = os.path.abspath(path)  # re-exports
     # Python on Windows is inconsistent regarding the capitalization of drive
     # letter and this cause issue with various path comparison along the way.
@@ -334,15 +363,15 @@
 
 
 # see posix.py for definitions
-normcasespec = encoding.normcasespecs.upper
+normcasespec: int = encoding.normcasespecs.upper
 normcasefallback = encoding.upperfallback
 
 
-def samestat(s1, s2):
+def samestat(s1: os.stat_result, s2: os.stat_result) -> bool:
     return False
 
 
-def shelltocmdexe(path, env):
+def shelltocmdexe(path: bytes, env: Mapping[bytes, bytes]) -> bytes:
     r"""Convert shell variables in the form $var and ${var} inside ``path``
     to %var% form.  Existing Windows style variables are left unchanged.
 
@@ -467,11 +496,11 @@
 # the number of backslashes that precede double quotes and add another
 # backslash before every double quote (being careful with the double
 # quote we've appended to the end)
-_quotere = None
+_quotere: Optional[Pattern[bytes]] = None
 _needsshellquote = None
 
 
-def shellquote(s):
+def shellquote(s: bytes) -> bytes:
     r"""
     >>> shellquote(br'C:\Users\xyz')
     '"C:\\Users\\xyz"'
@@ -501,24 +530,24 @@
     return b'"%s"' % _quotere.sub(br'\1\1\\\2', s)
 
 
-def _unquote(s):
+def _unquote(s: bytes) -> bytes:
     if s.startswith(b'"') and s.endswith(b'"'):
         return s[1:-1]
     return s
 
 
-def shellsplit(s):
+def shellsplit(s: bytes) -> List[bytes]:
     """Parse a command string in cmd.exe way (best-effort)"""
     return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
 
 
 # if you change this stub into a real check, please try to implement the
 # username and groupname functions above, too.
-def isowner(st):
+def isowner(st: os.stat_result) -> bool:
     return True
 
 
-def findexe(command):
+def findexe(command: bytes) -> Optional[bytes]:
     """Find executable for command searching like cmd.exe does.
     If command is a basename then PATH is searched for command.
     PATH isn't searched if command is an absolute or relative path.
@@ -529,7 +558,7 @@
     if os.path.splitext(command)[1].lower() in pathexts:
         pathexts = [b'']
 
-    def findexisting(pathcommand):
+    def findexisting(pathcommand: bytes) -> Optional[bytes]:
         """Will append extension (if needed) and return existing file"""
         for ext in pathexts:
             executable = pathcommand + ext
@@ -550,7 +579,7 @@
 _wantedkinds = {stat.S_IFREG, stat.S_IFLNK}
 
 
-def statfiles(files):
+def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]:
     """Stat each file in files. Yield each stat, or None if a file
     does not exist or has a type we don't care about.
 
@@ -576,7 +605,7 @@
         yield cache.get(base, None)
 
 
-def username(uid=None):
+def username(uid: Optional[int] = None) -> Optional[bytes]:
     """Return the name of the user with the given uid.
 
     If uid is None, return the name of the current user."""
@@ -591,14 +620,14 @@
     return None
 
 
-def groupname(gid=None):
+def groupname(gid: Optional[int] = None) -> Optional[bytes]:
     """Return the name of the group with the given gid.
 
     If gid is None, return the name of the current group."""
     return None
 
 
-def readlink(pathname):
+def readlink(pathname: bytes) -> bytes:
     path = pycompat.fsdecode(pathname)
     try:
         link = os.readlink(path)
@@ -611,7 +640,7 @@
     return pycompat.fsencode(link)
 
 
-def removedirs(name):
+def removedirs(name: bytes) -> None:
     """special version of os.removedirs that does not remove symlinked
     directories or junction points if they actually contain files"""
     if listdir(name):
@@ -630,7 +659,7 @@
         head, tail = os.path.split(head)
 
 
-def rename(src, dst):
+def rename(src: bytes, dst: bytes) -> None:
     '''atomically rename file src to dst, replacing dst if it exists'''
     try:
         os.rename(src, dst)
@@ -639,28 +668,32 @@
         os.rename(src, dst)
 
 
-def gethgcmd():
+def gethgcmd() -> List[bytes]:
     return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]]
 
 
-def groupmembers(name):
+def groupmembers(name: bytes) -> List[bytes]:
     # Don't support groups on Windows for now
     raise KeyError
 
 
-def isexec(f):
+def isexec(f: bytes) -> bool:
     return False
 
 
 class cachestat:
-    def __init__(self, path):
+    def __init__(self, path: bytes) -> None:
         pass
 
-    def cacheable(self):
+    def cacheable(self) -> bool:
         return False
 
 
-def lookupreg(key, valname=None, scope=None):
+def lookupreg(
+    key: bytes,
+    valname: Optional[bytes] = None,
+    scope: Optional[Union[int, Iterable[int]]] = None,
+) -> Optional[bytes]:
     """Look up a key/value name in the Windows registry.
 
     valname: value name. If unspecified, the default value for the key
@@ -693,25 +726,25 @@
             pass
 
 
-expandglobs = True
+expandglobs: bool = True
 
 
-def statislink(st):
+def statislink(st: Optional[os.stat_result]) -> bool:
     '''check whether a stat result is a symlink'''
     return False
 
 
-def statisexec(st):
+def statisexec(st: Optional[os.stat_result]) -> bool:
     '''check whether a stat result is an executable file'''
     return False
 
 
-def poll(fds):
+def poll(fds) -> List:
     # see posix.py for description
     raise NotImplementedError()
 
 
-def readpipe(pipe):
+def readpipe(pipe) -> bytes:
     """Read all available data from a pipe."""
     chunks = []
     while True:
@@ -727,5 +760,5 @@
     return b''.join(chunks)
 
 
-def bindunixsocket(sock, path):
+def bindunixsocket(sock, path: bytes) -> NoReturn:
     raise NotImplementedError('unsupported platform')
--- a/mercurial/worker.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/mercurial/worker.py	Thu Mar 02 22:45:44 2023 +0100
@@ -61,45 +61,6 @@
     return threading.current_thread() == threading.main_thread()
 
 
-class _blockingreader:
-    """Wrap unbuffered stream such that pickle.load() works with it.
-
-    pickle.load() expects that calls to read() and readinto() read as many
-    bytes as requested. On EOF, it is fine to read fewer bytes. In this case,
-    pickle.load() raises an EOFError.
-    """
-
-    def __init__(self, wrapped):
-        self._wrapped = wrapped
-
-    def readline(self):
-        return self._wrapped.readline()
-
-    def readinto(self, buf):
-        pos = 0
-        size = len(buf)
-
-        with memoryview(buf) as view:
-            while pos < size:
-                with view[pos:] as subview:
-                    ret = self._wrapped.readinto(subview)
-                if not ret:
-                    break
-                pos += ret
-
-        return pos
-
-    # issue multiple reads until size is fulfilled (or EOF is encountered)
-    def read(self, size=-1):
-        if size < 0:
-            return self._wrapped.readall()
-
-        buf = bytearray(size)
-        n_read = self.readinto(buf)
-        del buf[n_read:]
-        return bytes(buf)
-
-
 if pycompat.isposix or pycompat.iswindows:
     _STARTUP_COST = 0.01
     # The Windows worker is thread based. If tasks are CPU bound, threads
@@ -276,11 +237,26 @@
     selector = selectors.DefaultSelector()
     for rfd, wfd in pipes:
         os.close(wfd)
-        # The stream has to be unbuffered. Otherwise, if all data is read from
-        # the raw file into the buffer, the selector thinks that the FD is not
-        # ready to read while pickle.load() could read from the buffer. This
-        # would delay the processing of readable items.
-        selector.register(os.fdopen(rfd, 'rb', 0), selectors.EVENT_READ)
+        # Buffering is needed for performance, but it also presents a problem:
+        # selector doesn't take the buffered data into account,
+        # so we have to arrange it so that the buffers are empty when select is called
+        # (see [peek_nonblock])
+        selector.register(os.fdopen(rfd, 'rb', 4096), selectors.EVENT_READ)
+
+    def peek_nonblock(f):
+        os.set_blocking(f.fileno(), False)
+        res = f.peek()
+        os.set_blocking(f.fileno(), True)
+        return res
+
+    def load_all(f):
+        # The pytype error likely goes away on a modern version of
+        # pytype having a modern typeshed snapshot.
+        # pytype: disable=wrong-arg-types
+        yield pickle.load(f)
+        while len(peek_nonblock(f)) > 0:
+            yield pickle.load(f)
+        # pytype: enable=wrong-arg-types
 
     def cleanup():
         signal.signal(signal.SIGINT, oldhandler)
@@ -294,15 +270,11 @@
         while openpipes > 0:
             for key, events in selector.select():
                 try:
-                    # The pytype error likely goes away on a modern version of
-                    # pytype having a modern typeshed snapshot.
-                    # pytype: disable=wrong-arg-types
-                    res = pickle.load(_blockingreader(key.fileobj))
-                    # pytype: enable=wrong-arg-types
-                    if hasretval and res[0]:
-                        retval.update(res[1])
-                    else:
-                        yield res
+                    for res in load_all(key.fileobj):
+                        if hasretval and res[0]:
+                            retval.update(res[1])
+                        else:
+                            yield res
                 except EOFError:
                     selector.unregister(key.fileobj)
                     # pytype: disable=attribute-error
--- a/relnotes/next	Thu Mar 02 15:21:36 2023 +0100
+++ b/relnotes/next	Thu Mar 02 22:45:44 2023 +0100
@@ -2,6 +2,9 @@
 
 == New Features ==
 
+ * There is a new internal merge tool called `internal:union-other-first`.
+   It works like `internal:union` but add other side on top of local.
+
 == Default Format Change ==
 
 These changes affect newly created repositories (or new clones) done with
@@ -16,3 +19,7 @@
 == Internal API Changes ==
 
 == Miscellaneous ==
+
+ * pullbundle support no longer requires setting a server-side option,
+   providing a .hg/pullbundles.manifest according to the syntax specified in
+   'hg help -e clonebundles' is enough.
--- a/rust/Cargo.lock	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/Cargo.lock	Thu Mar 02 22:45:44 2023 +0100
@@ -10,21 +10,26 @@
 
 [[package]]
 name = "adler"
-version = "0.2.3"
+version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 
 [[package]]
 name = "ahash"
-version = "0.4.7"
+version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e"
+checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "version_check",
+]
 
 [[package]]
 name = "aho-corasick"
-version = "0.7.18"
+version = "0.7.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
 dependencies = [
  "memchr",
 ]
@@ -36,12 +41,12 @@
 checksum = "250f629c0161ad8107cf89319e990051fae62832fd343083bea452d93e2205fd"
 
 [[package]]
-name = "ansi_term"
-version = "0.12.1"
+name = "android_system_properties"
+version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
 dependencies = [
- "winapi",
+ "libc",
 ]
 
 [[package]]
@@ -57,9 +62,9 @@
 
 [[package]]
 name = "autocfg"
-version = "1.0.1"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
 
 [[package]]
 name = "bitflags"
@@ -87,14 +92,20 @@
 
 [[package]]
 name = "block-buffer"
-version = "0.10.2"
+version = "0.10.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
+checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e"
 dependencies = [
  "generic-array",
 ]
 
 [[package]]
+name = "bumpalo"
+version = "3.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba"
+
+[[package]]
 name = "byteorder"
 version = "1.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -102,18 +113,18 @@
 
 [[package]]
 name = "bytes-cast"
-version = "0.2.0"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
+checksum = "a20de93b91d7703ca0e39e12930e310acec5ff4d715f4166e0ab026babb352e8"
 dependencies = [
  "bytes-cast-derive",
 ]
 
 [[package]]
 name = "bytes-cast-derive"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb936af9de38476664d6b58e529aff30d482e4ce1c5e150293d00730b0d81fdb"
+checksum = "7470a6fcce58cde3d62cce758bf71007978b75247e6becd9255c9b884bcb4f71"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -122,58 +133,80 @@
 
 [[package]]
 name = "cc"
-version = "1.0.66"
+version = "1.0.76"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
+checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f"
 dependencies = [
  "jobserver",
 ]
 
 [[package]]
 name = "cfg-if"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-
-[[package]]
-name = "cfg-if"
 version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 
 [[package]]
 name = "chrono"
-version = "0.4.19"
+version = "0.4.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
 dependencies = [
- "libc",
+ "iana-time-zone",
+ "js-sys",
  "num-integer",
  "num-traits",
  "time",
+ "wasm-bindgen",
  "winapi",
 ]
 
 [[package]]
 name = "clap"
-version = "2.34.0"
+version = "4.0.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+checksum = "60494cedb60cb47462c0ff7be53de32c0e42a6fc2c772184554fa12bd9489c03"
 dependencies = [
- "ansi_term",
  "atty",
  "bitflags",
+ "clap_derive",
+ "clap_lex",
+ "once_cell",
  "strsim",
- "textwrap",
- "unicode-width",
- "vec_map",
+ "termcolor",
 ]
 
 [[package]]
-name = "const_fn"
-version = "0.4.4"
+name = "clap_derive"
+version = "4.0.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
+checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "codespan-reporting"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e"
+dependencies = [
+ "termcolor",
+ "unicode-width",
+]
 
 [[package]]
 name = "convert_case"
@@ -182,28 +215,25 @@
 checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
 
 [[package]]
-name = "cpufeatures"
-version = "0.1.4"
+name = "core-foundation-sys"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8"
-dependencies = [
- "libc",
-]
+checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc"
 
 [[package]]
 name = "cpufeatures"
-version = "0.2.1"
+version = "0.2.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"
+checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320"
 dependencies = [
  "libc",
 ]
 
 [[package]]
 name = "cpython"
-version = "0.7.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7d46ba8ace7f3a1d204ac5060a706d0a68de6b42eafb6a586cc08bebcffe664"
+checksum = "3052106c29da7390237bc2310c1928335733b286287754ea85e6093d2495280e"
 dependencies = [
  "libc",
  "num-traits",
@@ -213,20 +243,20 @@
 
 [[package]]
 name = "crc32fast"
-version = "1.2.1"
+version = "1.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
 ]
 
 [[package]]
 name = "crossbeam-channel"
-version = "0.5.2"
+version = "0.5.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
+checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
  "crossbeam-utils",
 ]
 
@@ -236,51 +266,93 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
  "crossbeam-epoch",
  "crossbeam-utils",
 ]
 
 [[package]]
 name = "crossbeam-epoch"
-version = "0.9.1"
+version = "0.9.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d"
+checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348"
 dependencies = [
- "cfg-if 1.0.0",
- "const_fn",
+ "autocfg",
+ "cfg-if",
  "crossbeam-utils",
- "lazy_static",
  "memoffset",
  "scopeguard",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.8.1"
+version = "0.8.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d"
+checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac"
 dependencies = [
- "autocfg",
- "cfg-if 1.0.0",
- "lazy_static",
+ "cfg-if",
 ]
 
 [[package]]
 name = "crypto-common"
-version = "0.1.2"
+version = "0.1.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4600d695eb3f6ce1cd44e6e291adceb2cc3ab12f20a33777ecd0bf6eba34e06"
+checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
 dependencies = [
  "generic-array",
+ "typenum",
 ]
 
 [[package]]
 name = "ctor"
-version = "0.1.16"
+version = "0.1.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
+dependencies = [
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "cxx"
+version = "1.0.81"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97abf9f0eca9e52b7f81b945524e76710e6cb2366aead23b7d4fbf72e281f888"
+dependencies = [
+ "cc",
+ "cxxbridge-flags",
+ "cxxbridge-macro",
+ "link-cplusplus",
+]
+
+[[package]]
+name = "cxx-build"
+version = "1.0.81"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
+checksum = "7cc32cc5fea1d894b77d269ddb9f192110069a8a9c1f1d441195fba90553dea3"
 dependencies = [
+ "cc",
+ "codespan-reporting",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "scratch",
+ "syn",
+]
+
+[[package]]
+name = "cxxbridge-flags"
+version = "1.0.81"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ca220e4794c934dc6b1207c3b42856ad4c302f2df1712e9f8d2eec5afaacf1f"
+
+[[package]]
+name = "cxxbridge-macro"
+version = "1.0.81"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704"
+dependencies = [
+ "proc-macro2",
  "quote",
  "syn",
 ]
@@ -300,9 +372,9 @@
 
 [[package]]
 name = "diff"
-version = "0.1.12"
+version = "0.1.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499"
+checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
 
 [[package]]
 name = "digest"
@@ -315,25 +387,25 @@
 
 [[package]]
 name = "digest"
-version = "0.10.2"
+version = "0.10.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cb780dce4f9a8f5c087362b3a4595936b2019e7c8b30f2c3e9a7e94e6ae9837"
+checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c"
 dependencies = [
- "block-buffer 0.10.2",
+ "block-buffer 0.10.3",
  "crypto-common",
 ]
 
 [[package]]
 name = "either"
-version = "1.6.1"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
 
 [[package]]
 name = "env_logger"
-version = "0.9.0"
+version = "0.9.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
+checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
 dependencies = [
  "atty",
  "humantime",
@@ -344,22 +416,20 @@
 
 [[package]]
 name = "fastrand"
-version = "1.7.0"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf"
+checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
 dependencies = [
  "instant",
 ]
 
 [[package]]
 name = "flate2"
-version = "1.0.22"
+version = "1.0.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f"
+checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6"
 dependencies = [
- "cfg-if 1.0.0",
  "crc32fast",
- "libc",
  "libz-sys",
  "miniz_oxide",
 ]
@@ -386,9 +456,9 @@
 
 [[package]]
 name = "generic-array"
-version = "0.14.4"
+version = "0.14.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
+checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
 dependencies = [
  "typenum",
  "version_check",
@@ -396,47 +466,47 @@
 
 [[package]]
 name = "getrandom"
-version = "0.1.15"
+version = "0.1.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
+checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
 dependencies = [
- "cfg-if 0.1.10",
+ "cfg-if",
  "libc",
  "wasi 0.9.0+wasi-snapshot-preview1",
 ]
 
 [[package]]
 name = "getrandom"
-version = "0.2.4"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c"
+checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
  "libc",
- "wasi 0.10.0+wasi-snapshot-preview1",
+ "wasi 0.11.0+wasi-snapshot-preview1",
 ]
 
 [[package]]
-name = "glob"
-version = "0.3.0"
+name = "hashbrown"
+version = "0.13.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
-
-[[package]]
-name = "hashbrown"
-version = "0.9.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038"
 dependencies = [
  "ahash",
  "rayon",
 ]
 
 [[package]]
+name = "heck"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+
+[[package]]
 name = "hermit-abi"
-version = "0.1.17"
+version = "0.1.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
 dependencies = [
  "libc",
 ]
@@ -462,12 +532,12 @@
  "hashbrown",
  "home",
  "im-rc",
- "itertools 0.10.3",
+ "itertools",
  "lazy_static",
  "libc",
  "log",
+ "logging_timer",
  "memmap2",
- "micro-timer",
  "once_cell",
  "ouroboros",
  "pretty_assertions",
@@ -500,9 +570,9 @@
 
 [[package]]
 name = "home"
-version = "0.5.3"
+version = "0.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654"
+checksum = "747309b4b440c06d57b0b25f2aee03ee9b5e5397d288c60e21fc709bb98a7408"
 dependencies = [
  "winapi",
 ]
@@ -514,13 +584,37 @@
 checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
 
 [[package]]
+name = "iana-time-zone"
+version = "0.1.53"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765"
+dependencies = [
+ "android_system_properties",
+ "core-foundation-sys",
+ "iana-time-zone-haiku",
+ "js-sys",
+ "wasm-bindgen",
+ "winapi",
+]
+
+[[package]]
+name = "iana-time-zone-haiku"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca"
+dependencies = [
+ "cxx",
+ "cxx-build",
+]
+
+[[package]]
 name = "im-rc"
-version = "15.0.0"
+version = "15.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
+checksum = "af1955a75fa080c677d3972822ec4bad316169ab1cfc6c257a942c2265dbe5fe"
 dependencies = [
  "bitmaps",
- "rand_core 0.5.1",
+ "rand_core 0.6.4",
  "rand_xoshiro",
  "sized-chunks",
  "typenum",
@@ -533,37 +627,37 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
 ]
 
 [[package]]
 name = "itertools"
-version = "0.9.0"
+version = "0.10.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
-dependencies = [
- "either",
-]
-
-[[package]]
-name = "itertools"
-version = "0.10.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
 dependencies = [
  "either",
 ]
 
 [[package]]
 name = "jobserver"
-version = "0.1.21"
+version = "0.1.25"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
+checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b"
 dependencies = [
  "libc",
 ]
 
 [[package]]
+name = "js-sys"
+version = "0.3.60"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
 name = "lazy_static"
 version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -571,21 +665,21 @@
 
 [[package]]
 name = "libc"
-version = "0.2.124"
+version = "0.2.137"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50"
+checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
 
 [[package]]
 name = "libm"
-version = "0.2.1"
+version = "0.2.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
+checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb"
 
 [[package]]
 name = "libz-sys"
-version = "1.1.2"
+version = "1.1.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
+checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf"
 dependencies = [
  "cc",
  "pkg-config",
@@ -593,25 +687,56 @@
 ]
 
 [[package]]
+name = "link-cplusplus"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369"
+dependencies = [
+ "cc",
+]
+
+[[package]]
 name = "log"
-version = "0.4.14"
+version = "0.4.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "logging_timer"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "64e96f261d684b7089aa576bb74e823241dccd994b27d30fabf1dcb3af284fe9"
 dependencies = [
- "cfg-if 1.0.0",
+ "log",
+ "logging_timer_proc_macros",
+]
+
+[[package]]
+name = "logging_timer_proc_macros"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10a9062912d7952c5588cc474795e0b9ee008e7e6781127945b85413d4b99d81"
+dependencies = [
+ "log",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
 name = "memchr"
-version = "2.4.1"
+version = "2.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
 
 [[package]]
 name = "memmap2"
-version = "0.5.7"
+version = "0.5.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95af15f345b17af2efc8ead6080fb8bc376f8cec1b35277b935637595fe77498"
+checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc"
 dependencies = [
  "libc",
  "stable_deref_trait",
@@ -619,50 +744,27 @@
 
 [[package]]
 name = "memoffset"
-version = "0.6.1"
+version = "0.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
 dependencies = [
  "autocfg",
 ]
 
 [[package]]
-name = "micro-timer"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de32cb59a062672560d6f0842c4aa7714727457b9fe2daf8987d995a176a405"
-dependencies = [
- "micro-timer-macros",
- "scopeguard",
-]
-
-[[package]]
-name = "micro-timer-macros"
-version = "0.4.0"
+name = "miniz_oxide"
+version = "0.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cee948b94700125b52dfb68dd17c19f6326696c1df57f92c05ee857463c93ba1"
-dependencies = [
- "proc-macro2",
- "quote",
- "scopeguard",
- "syn",
-]
-
-[[package]]
-name = "miniz_oxide"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
+checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34"
 dependencies = [
  "adler",
- "autocfg",
 ]
 
 [[package]]
 name = "num-integer"
-version = "0.1.44"
+version = "0.1.45"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
+checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
 dependencies = [
  "autocfg",
  "num-traits",
@@ -670,9 +772,9 @@
 
 [[package]]
 name = "num-traits"
-version = "0.2.14"
+version = "0.2.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
 dependencies = [
  "autocfg",
  "libm",
@@ -680,9 +782,9 @@
 
 [[package]]
 name = "num_cpus"
-version = "1.13.0"
+version = "1.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
 dependencies = [
  "hermit-abi",
  "libc",
@@ -690,9 +792,9 @@
 
 [[package]]
 name = "once_cell"
-version = "1.14.0"
+version = "1.16.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0"
+checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860"
 
 [[package]]
 name = "opaque-debug"
@@ -701,21 +803,26 @@
 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
 
 [[package]]
+name = "os_str_bytes"
+version = "6.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b5bf27447411e9ee3ff51186bf7a08e16c341efdde93f4d823e8844429bed7e"
+
+[[package]]
 name = "ouroboros"
-version = "0.15.0"
+version = "0.15.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f31a3b678685b150cba82b702dcdc5e155893f63610cf388d30cd988d4ca2bf"
+checksum = "dfbb50b356159620db6ac971c6d5c9ab788c9cc38a6f49619fca2a27acb062ca"
 dependencies = [
  "aliasable",
  "ouroboros_macro",
- "stable_deref_trait",
 ]
 
 [[package]]
 name = "ouroboros_macro"
-version = "0.15.0"
+version = "0.15.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408"
+checksum = "4a0d9d1a6191c4f391f87219d1ea42b23f09ee84d64763cd05ee6ea88d9f384d"
 dependencies = [
  "Inflector",
  "proc-macro-error",
@@ -726,41 +833,41 @@
 
 [[package]]
 name = "output_vt100"
-version = "0.1.2"
+version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9"
+checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66"
 dependencies = [
  "winapi",
 ]
 
 [[package]]
 name = "paste"
-version = "1.0.5"
+version = "1.0.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
+checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1"
 
 [[package]]
 name = "pkg-config"
-version = "0.3.19"
+version = "0.3.26"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
+checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160"
 
 [[package]]
 name = "ppv-lite86"
-version = "0.2.10"
+version = "0.2.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
 
 [[package]]
 name = "pretty_assertions"
-version = "1.1.0"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76d5b548b725018ab5496482b45cb8bef21e9fed1858a6d674e3a8a0f0bb5d50"
+checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755"
 dependencies = [
- "ansi_term",
  "ctor",
  "diff",
  "output_vt100",
+ "yansi",
 ]
 
 [[package]]
@@ -789,18 +896,18 @@
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.24"
+version = "1.0.47"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
+checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
 dependencies = [
- "unicode-xid",
+ "unicode-ident",
 ]
 
 [[package]]
 name = "python3-sys"
-version = "0.7.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b18b32e64c103d5045f44644d7ddddd65336f7a0521f6fde673240a9ecceb77e"
+checksum = "49f8b50d72fb3015735aa403eebf19bbd72c093bfeeae24ee798be5f2f1aab52"
 dependencies = [
  "libc",
  "regex",
@@ -808,9 +915,9 @@
 
 [[package]]
 name = "quote"
-version = "1.0.7"
+version = "1.0.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
+checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
 dependencies = [
  "proc-macro2",
 ]
@@ -821,7 +928,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
 dependencies = [
- "getrandom 0.1.15",
+ "getrandom 0.1.16",
  "libc",
  "rand_chacha 0.2.2",
  "rand_core 0.5.1",
@@ -836,7 +943,7 @@
 dependencies = [
  "libc",
  "rand_chacha 0.3.1",
- "rand_core 0.6.3",
+ "rand_core 0.6.4",
 ]
 
 [[package]]
@@ -856,7 +963,7 @@
 checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
 dependencies = [
  "ppv-lite86",
- "rand_core 0.6.3",
+ "rand_core 0.6.4",
 ]
 
 [[package]]
@@ -865,16 +972,16 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
 dependencies = [
- "getrandom 0.1.15",
+ "getrandom 0.1.16",
 ]
 
 [[package]]
 name = "rand_core"
-version = "0.6.3"
+version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
 dependencies = [
- "getrandom 0.2.4",
+ "getrandom 0.2.8",
 ]
 
 [[package]]
@@ -902,16 +1009,16 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
 dependencies = [
- "rand_core 0.6.3",
+ "rand_core 0.6.4",
 ]
 
 [[package]]
 name = "rand_xoshiro"
-version = "0.4.0"
+version = "0.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
+checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa"
 dependencies = [
- "rand_core 0.5.1",
+ "rand_core 0.6.4",
 ]
 
 [[package]]
@@ -938,18 +1045,18 @@
 
 [[package]]
 name = "redox_syscall"
-version = "0.2.11"
+version = "0.2.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8380fe0152551244f0747b1bf41737e0f8a74f97a14ccefd1148187271634f3c"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
 dependencies = [
  "bitflags",
 ]
 
 [[package]]
 name = "regex"
-version = "1.5.5"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
+checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
 dependencies = [
  "aho-corasick",
  "memchr",
@@ -958,9 +1065,9 @@
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.25"
+version = "0.6.28"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
 
 [[package]]
 name = "remove_dir_all"
@@ -985,7 +1092,7 @@
  "home",
  "lazy_static",
  "log",
- "micro-timer",
+ "logging_timer",
  "rayon",
  "regex",
  "users",
@@ -1017,20 +1124,26 @@
 checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
 
 [[package]]
+name = "scratch"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898"
+
+[[package]]
 name = "semver"
-version = "1.0.6"
+version = "1.0.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d"
+checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4"
 
 [[package]]
 name = "sha-1"
-version = "0.9.6"
+version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16"
+checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
 dependencies = [
  "block-buffer 0.9.0",
- "cfg-if 1.0.0",
- "cpufeatures 0.1.4",
+ "cfg-if",
+ "cpufeatures",
  "digest 0.9.0",
  "opaque-debug",
 ]
@@ -1041,16 +1154,16 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
 dependencies = [
- "cfg-if 1.0.0",
- "cpufeatures 0.2.1",
- "digest 0.10.2",
+ "cfg-if",
+ "cpufeatures",
+ "digest 0.10.5",
 ]
 
 [[package]]
 name = "sized-chunks"
-version = "0.6.2"
+version = "0.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1ec31ceca5644fa6d444cc77548b88b67f46db6f7c71683b0f9336e671830d2f"
+checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e"
 dependencies = [
  "bitmaps",
  "typenum",
@@ -1070,19 +1183,19 @@
 
 [[package]]
 name = "strsim"
-version = "0.8.0"
+version = "0.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
 
 [[package]]
 name = "syn"
-version = "1.0.54"
+version = "1.0.103"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
+checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d"
 dependencies = [
  "proc-macro2",
  "quote",
- "unicode-xid",
+ "unicode-ident",
 ]
 
 [[package]]
@@ -1091,7 +1204,7 @@
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
  "fastrand",
  "libc",
  "redox_syscall",
@@ -1101,23 +1214,14 @@
 
 [[package]]
 name = "termcolor"
-version = "1.1.2"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
+checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
 dependencies = [
  "winapi-util",
 ]
 
 [[package]]
-name = "textwrap"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-dependencies = [
- "unicode-width",
-]
-
-[[package]]
 name = "thread_local"
 version = "1.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1139,32 +1243,32 @@
 
 [[package]]
 name = "twox-hash"
-version = "1.6.2"
+version = "1.6.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0"
+checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
 dependencies = [
- "cfg-if 1.0.0",
+ "cfg-if",
  "rand 0.8.5",
  "static_assertions",
 ]
 
 [[package]]
 name = "typenum"
-version = "1.12.0"
+version = "1.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
+checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
 
 [[package]]
 name = "unicode-width"
-version = "0.1.9"
+version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
-
-[[package]]
-name = "unicode-xid"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
 
 [[package]]
 name = "users"
@@ -1178,9 +1282,9 @@
 
 [[package]]
 name = "vcpkg"
-version = "0.2.11"
+version = "0.2.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
 
 [[package]]
 name = "vcsgraph"
@@ -1190,20 +1294,14 @@
 dependencies = [
  "hex",
  "rand 0.7.3",
- "sha-1 0.9.6",
+ "sha-1 0.9.8",
 ]
 
 [[package]]
-name = "vec_map"
-version = "0.8.2"
+name = "version_check"
+version = "0.9.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-
-[[package]]
-name = "version_check"
-version = "0.9.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
 
 [[package]]
 name = "wasi"
@@ -1218,14 +1316,74 @@
 checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
 
 [[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.83"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
+
+[[package]]
 name = "which"
-version = "4.2.5"
+version = "4.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae"
+checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b"
 dependencies = [
  "either",
- "lazy_static",
  "libc",
+ "once_cell",
 ]
 
 [[package]]
@@ -1260,19 +1418,25 @@
 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
+name = "yansi"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
+
+[[package]]
 name = "zstd"
-version = "0.5.4+zstd.1.4.7"
+version = "0.11.2+zstd.1.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910"
+checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
 dependencies = [
  "zstd-safe",
 ]
 
 [[package]]
 name = "zstd-safe"
-version = "2.0.6+zstd.1.4.7"
+version = "5.0.2+zstd.1.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e"
+checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"
 dependencies = [
  "libc",
  "zstd-sys",
@@ -1280,12 +1444,10 @@
 
 [[package]]
 name = "zstd-sys"
-version = "1.4.18+zstd.1.4.7"
+version = "2.0.1+zstd.1.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81"
+checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b"
 dependencies = [
  "cc",
- "glob",
- "itertools 0.9.0",
  "libc",
 ]
--- a/rust/README.rst	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/README.rst	Thu Mar 02 22:45:44 2023 +0100
@@ -77,8 +77,8 @@
 Developing Rust
 ===============
 
-The current version of Rust in use is ``1.48.0``, because it's what Debian
-stable has. You can use ``rustup override set 1.48.0`` at the root of the repo
+The current version of Rust in use is ``1.61.0``, because it's what Debian
+testing has. You can use ``rustup override set 1.61.0`` at the root of the repo
 to make it easier on you.
 
 Go to the ``hg-cpython`` folder::
--- a/rust/hg-core/Cargo.toml	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/Cargo.toml	Thu Mar 02 22:45:44 2023 +0100
@@ -3,50 +3,50 @@
 version = "0.1.0"
 authors = ["Georges Racinet <gracinet@anybox.fr>"]
 description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
-edition = "2018"
+edition = "2021"
 
 [lib]
 name = "hg"
 
 [dependencies]
 bitflags = "1.3.2"
-bytes-cast = "0.2.0"
+bytes-cast = "0.3.0"
 byteorder = "1.4.3"
 derive_more = "0.99.17"
-hashbrown = { version = "0.9.1", features = ["rayon"] }
-home = "0.5.3"
-im-rc = "15.0"
-itertools = "0.10.3"
+hashbrown = { version = "0.13.1", features = ["rayon"] }
+home = "0.5.4"
+im-rc = "15.1.0"
+itertools = "0.10.5"
 lazy_static = "1.4.0"
-libc = "0.2"
-ouroboros = "0.15.0"
-rand = "0.8.4"
+libc = "0.2.137"
+logging_timer = "1.1.0"
+ouroboros = "0.15.5"
+rand = "0.8.5"
 rand_pcg = "0.3.1"
 rand_distr = "0.4.3"
 rayon = "1.6.1"
-regex = "1.5.5"
+regex = "1.7.0"
 sha-1 = "0.10.0"
-twox-hash = "1.6.2"
+twox-hash = "1.6.3"
 same-file = "1.0.6"
-tempfile = "3.1.0"
+tempfile = "3.3.0"
 thread_local = "1.1.4"
-crossbeam-channel = "0.5.0"
-micro-timer = "0.4.0"
-log = "0.4.8"
-memmap2 = { version = "0.5.3", features = ["stable_deref_trait"] }
-zstd = "0.5.3"
+crossbeam-channel = "0.5.6"
+log = "0.4.17"
+memmap2 = { version = "0.5.8", features = ["stable_deref_trait"] }
+zstd = "0.11.2"
 format-bytes = "0.3.0"
 # once_cell 1.15 uses edition 2021, while the heptapod CI
 # uses an old version of Cargo that doesn't support it.
-once_cell = "1.14.0"
+once_cell = "1.16.0"
 
 # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
 # we have a clearer view of which backend is the fastest.
 [dependencies.flate2]
-version = "1.0.22"
+version = "1.0.24"
 features = ["zlib"]
 default-features = false
 
 [dev-dependencies]
-clap = "2.34.0"
+clap = { version = "4.0.24", features = ["derive"] }
 pretty_assertions = "1.1.0"
--- a/rust/hg-core/examples/nodemap/main.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/examples/nodemap/main.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -3,7 +3,6 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use clap::*;
 use hg::revlog::node::*;
 use hg::revlog::nodemap::*;
 use hg::revlog::*;
@@ -13,7 +12,6 @@
 use std::io;
 use std::io::Write;
 use std::path::{Path, PathBuf};
-use std::str::FromStr;
 use std::time::Instant;
 
 mod index;
@@ -42,7 +40,7 @@
         nm.insert(index, index.node(rev).unwrap(), rev).unwrap();
     }
     eprintln!("Nodemap constructed in RAM in {:?}", start.elapsed());
-    file.write(&nm.into_readonly_and_added_bytes().1)?;
+    file.write_all(&nm.into_readonly_and_added_bytes().1)?;
     eprintln!("Nodemap written to disk");
     Ok(())
 }
@@ -57,12 +55,7 @@
     let len = index.len() as u32;
     let mut rng = rand::thread_rng();
     let nodes: Vec<Node> = (0..queries)
-        .map(|_| {
-            index
-                .node((rng.gen::<u32>() % len) as Revision)
-                .unwrap()
-                .clone()
-        })
+        .map(|_| *index.node((rng.gen::<u32>() % len) as Revision).unwrap())
         .collect();
     if queries < 10 {
         let nodes_hex: Vec<String> =
@@ -86,61 +79,66 @@
 }
 
 fn main() {
-    let matches = App::new("Nodemap pure Rust example")
-        .arg(
-            Arg::with_name("REPOSITORY")
-                .help("Path to the repository, always necessary for its index")
-                .required(true),
-        )
-        .arg(
-            Arg::with_name("NODEMAP_FILE")
-                .help("Path to the nodemap file, independent of REPOSITORY")
-                .required(true),
-        )
-        .subcommand(
-            SubCommand::with_name("create")
-                .about("Create NODEMAP_FILE by scanning repository index"),
-        )
-        .subcommand(
-            SubCommand::with_name("query")
-                .about("Query NODEMAP_FILE for PREFIX")
-                .arg(Arg::with_name("PREFIX").required(true)),
-        )
-        .subcommand(
-            SubCommand::with_name("bench")
-                .about(
-                    "Perform #QUERIES random successful queries on NODEMAP_FILE")
-                .arg(Arg::with_name("QUERIES").required(true)),
-        )
-        .get_matches();
+    use clap::{Parser, Subcommand};
 
-    let repo = matches.value_of("REPOSITORY").unwrap();
-    let nm_path = matches.value_of("NODEMAP_FILE").unwrap();
-
-    let index = mmap_index(&Path::new(repo));
+    #[derive(Parser)]
+    #[command()]
+    /// Nodemap pure Rust example
+    struct App {
+        // Path to the repository, always necessary for its index
+        #[arg(short, long)]
+        repository: PathBuf,
+        // Path to the nodemap file, independent of REPOSITORY
+        #[arg(short, long)]
+        nodemap_file: PathBuf,
+        #[command(subcommand)]
+        command: Command,
+    }
 
-    if let Some(_) = matches.subcommand_matches("create") {
-        println!("Creating nodemap file {} for repository {}", nm_path, repo);
-        create(&index, &Path::new(nm_path)).unwrap();
-        return;
+    #[derive(Subcommand)]
+    enum Command {
+        /// Create `NODEMAP_FILE` by scanning repository index
+        Create,
+        /// Query `NODEMAP_FILE` for `prefix`
+        Query { prefix: String },
+        /// Perform #`QUERIES` random successful queries on `NODEMAP_FILE`
+        Bench { queries: usize },
     }
 
-    let nm = mmap_nodemap(&Path::new(nm_path));
-    if let Some(matches) = matches.subcommand_matches("query") {
-        let prefix = matches.value_of("PREFIX").unwrap();
-        println!(
-            "Querying {} in nodemap file {} of repository {}",
-            prefix, nm_path, repo
-        );
-        query(&index, &nm, prefix);
-    }
-    if let Some(matches) = matches.subcommand_matches("bench") {
-        let queries =
-            usize::from_str(matches.value_of("QUERIES").unwrap()).unwrap();
-        println!(
-            "Doing {} random queries in nodemap file {} of repository {}",
-            queries, nm_path, repo
-        );
-        bench(&index, &nm, queries);
+    let app = App::parse();
+
+    let repo = &app.repository;
+    let nm_path = &app.nodemap_file;
+
+    let index = mmap_index(repo);
+    let nm = mmap_nodemap(nm_path);
+
+    match &app.command {
+        Command::Create => {
+            println!(
+                "Creating nodemap file {} for repository {}",
+                nm_path.display(),
+                repo.display()
+            );
+            create(&index, Path::new(nm_path)).unwrap();
+        }
+        Command::Bench { queries } => {
+            println!(
+                "Doing {} random queries in nodemap file {} of repository {}",
+                queries,
+                nm_path.display(),
+                repo.display()
+            );
+            bench(&index, &nm, *queries);
+        }
+        Command::Query { prefix } => {
+            println!(
+                "Querying {} in nodemap file {} of repository {}",
+                prefix,
+                nm_path.display(),
+                repo.display()
+            );
+            query(&index, &nm, prefix);
+        }
     }
 }
--- a/rust/hg-core/src/ancestors.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/ancestors.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -175,7 +175,7 @@
     ///
     /// This is useful in unit tests, but also setdiscovery.py does
     /// read the bases attribute of a ancestor.missingancestors instance.
-    pub fn get_bases<'a>(&'a self) -> &'a HashSet<Revision> {
+    pub fn get_bases(&self) -> &HashSet<Revision> {
         &self.bases
     }
 
@@ -288,7 +288,7 @@
             .collect();
         let revs_visit = &mut revs;
         let mut both_visit: HashSet<Revision> =
-            revs_visit.intersection(&bases_visit).cloned().collect();
+            revs_visit.intersection(bases_visit).cloned().collect();
         if revs_visit.is_empty() {
             return Ok(Vec::new());
         }
@@ -357,7 +357,6 @@
 
     use super::*;
     use crate::testing::{SampleGraph, VecGraph};
-    use std::iter::FromIterator;
 
     fn list_ancestors<G: Graph>(
         graph: G,
@@ -504,18 +503,18 @@
             MissingAncestors::new(SampleGraph, [5, 3, 1, 3].iter().cloned());
         let mut as_vec: Vec<Revision> =
             missing_ancestors.get_bases().iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         assert_eq!(as_vec, [1, 3, 5]);
         assert_eq!(missing_ancestors.max_base, 5);
 
         missing_ancestors.add_bases([3, 7, 8].iter().cloned());
         as_vec = missing_ancestors.get_bases().iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         assert_eq!(as_vec, [1, 3, 5, 7, 8]);
         assert_eq!(missing_ancestors.max_base, 8);
 
         as_vec = missing_ancestors.bases_heads()?.iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         assert_eq!(as_vec, [3, 5, 7, 8]);
         Ok(())
     }
@@ -532,7 +531,7 @@
             .remove_ancestors_from(&mut revset)
             .unwrap();
         let mut as_vec: Vec<Revision> = revset.into_iter().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         assert_eq!(as_vec.as_slice(), expected);
     }
 
@@ -573,6 +572,7 @@
     /// the one in test-ancestor.py. An early version of Rust MissingAncestors
     /// failed this, yet none of the integration tests of the whole suite
     /// catched it.
+    #[allow(clippy::unnecessary_cast)]
     #[test]
     fn test_remove_ancestors_from_case1() {
         let graph: VecGraph = vec![
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/checkexec.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,119 @@
+use std::fs;
+use std::io;
+use std::os::unix::fs::{MetadataExt, PermissionsExt};
+use std::path::Path;
+
+const EXECFLAGS: u32 = 0o111;
+
+fn is_executable(path: impl AsRef<Path>) -> Result<bool, io::Error> {
+    let metadata = fs::metadata(path)?;
+    let mode = metadata.mode();
+    Ok(mode & EXECFLAGS != 0)
+}
+
+fn make_executable(path: impl AsRef<Path>) -> Result<(), io::Error> {
+    let mode = fs::metadata(path.as_ref())?.mode();
+    fs::set_permissions(
+        path,
+        fs::Permissions::from_mode((mode & 0o777) | EXECFLAGS),
+    )?;
+    Ok(())
+}
+
+fn copy_mode(
+    src: impl AsRef<Path>,
+    dst: impl AsRef<Path>,
+) -> Result<(), io::Error> {
+    let mode = match fs::symlink_metadata(src) {
+        Ok(metadata) => metadata.mode(),
+        Err(e) if e.kind() == io::ErrorKind::NotFound =>
+        // copymode in python has a more complicated handling of FileNotFound
+        // error, which we don't need because all it does is applying
+        // umask, which the OS already does when we mkdir.
+        {
+            return Ok(())
+        }
+        Err(e) => return Err(e),
+    };
+    fs::set_permissions(dst, fs::Permissions::from_mode(mode))?;
+    Ok(())
+}
+
+fn check_exec_impl(path: impl AsRef<Path>) -> Result<bool, io::Error> {
+    let basedir = path.as_ref().join(".hg");
+    let cachedir = basedir.join("wcache");
+    let storedir = basedir.join("store");
+
+    if !cachedir.exists() {
+        // we want to create the 'cache' directory, not the '.hg' one.
+        // Automatically creating '.hg' directory could silently spawn
+        // invalid Mercurial repositories. That seems like a bad idea.
+        fs::create_dir(&cachedir)
+            .and_then(|()| {
+                if storedir.exists() {
+                    copy_mode(&storedir, &cachedir)
+                } else {
+                    copy_mode(&basedir, &cachedir)
+                }
+            })
+            .ok();
+    }
+
+    let leave_file: bool;
+    let checkdir: &Path;
+    let checkisexec = cachedir.join("checkisexec");
+    let checknoexec = cachedir.join("checknoexec");
+    if cachedir.is_dir() {
+        // Check if both files already exist in cache and have correct
+        // permissions. if so, we assume that permissions work.
+        // If not, we delete the files and try again.
+        match is_executable(&checkisexec) {
+            Err(e) if e.kind() == io::ErrorKind::NotFound => (),
+            Err(e) => return Err(e),
+            Ok(is_exec) => {
+                if is_exec {
+                    let noexec_is_exec = match is_executable(&checknoexec) {
+                        Err(e) if e.kind() == io::ErrorKind::NotFound => {
+                            fs::write(&checknoexec, "")?;
+                            is_executable(&checknoexec)?
+                        }
+                        Err(e) => return Err(e),
+                        Ok(exec) => exec,
+                    };
+                    if !noexec_is_exec {
+                        // check-exec is exec and check-no-exec is not exec
+                        return Ok(true);
+                    }
+                    fs::remove_file(&checknoexec)?;
+                }
+                fs::remove_file(&checkisexec)?;
+            }
+        }
+        checkdir = &cachedir;
+        leave_file = true;
+    } else {
+        // no cache directory (probably because .hg doesn't exist):
+        // check directly in `path` and don't leave the temp file behind
+        checkdir = path.as_ref();
+        leave_file = false;
+    };
+
+    let tmp_file = tempfile::NamedTempFile::new_in(checkdir)?;
+    if !is_executable(tmp_file.path())? {
+        make_executable(tmp_file.path())?;
+        if is_executable(tmp_file.path())? {
+            if leave_file {
+                tmp_file.persist(checkisexec).ok();
+            }
+            return Ok(true);
+        }
+    }
+
+    Ok(false)
+}
+
+/// This function is a rust rewrite of [checkexec] function from [posix.py]
+/// Returns true if the filesystem supports execute permissions.
+pub fn check_exec(path: impl AsRef<Path>) -> bool {
+    check_exec_impl(path).unwrap_or(false)
+}
--- a/rust/hg-core/src/config.rs	Thu Mar 02 15:21:36 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-// config.rs
-//
-// Copyright 2020
-//      Valentin Gatien-Baron,
-//      Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-//! Mercurial config parsing and interfaces.
-
-mod config;
-mod layer;
-mod plain_info;
-mod values;
-pub use config::{Config, ConfigSource, ConfigValueParseError};
-pub use layer::{ConfigError, ConfigOrigin, ConfigParseError};
-pub use plain_info::PlainInfo;
--- a/rust/hg-core/src/config/config.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/config/config.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,654 +1,1 @@
-// config.rs
-//
-// Copyright 2020
-//      Valentin Gatien-Baron,
-//      Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
 
-use super::layer;
-use super::values;
-use crate::config::layer::{
-    ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
-};
-use crate::config::plain_info::PlainInfo;
-use crate::utils::files::get_bytes_from_os_str;
-use format_bytes::{write_bytes, DisplayBytes};
-use std::collections::HashSet;
-use std::env;
-use std::fmt;
-use std::path::{Path, PathBuf};
-use std::str;
-
-use crate::errors::{HgResultExt, IoResultExt};
-
-/// Holds the config values for the current repository
-/// TODO update this docstring once we support more sources
-#[derive(Clone)]
-pub struct Config {
-    layers: Vec<layer::ConfigLayer>,
-    plain: PlainInfo,
-}
-
-impl DisplayBytes for Config {
-    fn display_bytes(
-        &self,
-        out: &mut dyn std::io::Write,
-    ) -> std::io::Result<()> {
-        for (index, layer) in self.layers.iter().rev().enumerate() {
-            write_bytes!(
-                out,
-                b"==== Layer {} (trusted: {}) ====\n{}",
-                index,
-                if layer.trusted {
-                    &b"yes"[..]
-                } else {
-                    &b"no"[..]
-                },
-                layer
-            )?;
-        }
-        Ok(())
-    }
-}
-
-pub enum ConfigSource {
-    /// Absolute path to a config file
-    AbsPath(PathBuf),
-    /// Already parsed (from the CLI, env, Python resources, etc.)
-    Parsed(layer::ConfigLayer),
-}
-
-#[derive(Debug)]
-pub struct ConfigValueParseError {
-    pub origin: ConfigOrigin,
-    pub line: Option<usize>,
-    pub section: Vec<u8>,
-    pub item: Vec<u8>,
-    pub value: Vec<u8>,
-    pub expected_type: &'static str,
-}
-
-impl fmt::Display for ConfigValueParseError {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        // TODO: add origin and line number information, here and in
-        // corresponding python code
-        write!(
-            f,
-            "config error: {}.{} is not a {} ('{}')",
-            String::from_utf8_lossy(&self.section),
-            String::from_utf8_lossy(&self.item),
-            self.expected_type,
-            String::from_utf8_lossy(&self.value)
-        )
-    }
-}
-
-/// Returns true if the config item is disabled by PLAIN or PLAINEXCEPT
-fn should_ignore(plain: &PlainInfo, section: &[u8], item: &[u8]) -> bool {
-    // duplication with [_applyconfig] in [ui.py],
-    if !plain.is_plain() {
-        return false;
-    }
-    if section == b"alias" {
-        return plain.plainalias();
-    }
-    if section == b"revsetalias" {
-        return plain.plainrevsetalias();
-    }
-    if section == b"templatealias" {
-        return plain.plaintemplatealias();
-    }
-    if section == b"ui" {
-        let to_delete: &[&[u8]] = &[
-            b"debug",
-            b"fallbackencoding",
-            b"quiet",
-            b"slash",
-            b"logtemplate",
-            b"message-output",
-            b"statuscopies",
-            b"style",
-            b"traceback",
-            b"verbose",
-        ];
-        return to_delete.contains(&item);
-    }
-    let sections_to_delete: &[&[u8]] =
-        &[b"defaults", b"commands", b"command-templates"];
-    return sections_to_delete.contains(&section);
-}
-
-impl Config {
-    /// The configuration to use when printing configuration-loading errors
-    pub fn empty() -> Self {
-        Self {
-            layers: Vec::new(),
-            plain: PlainInfo::empty(),
-        }
-    }
-
-    /// Load system and user configuration from various files.
-    ///
-    /// This is also affected by some environment variables.
-    pub fn load_non_repo() -> Result<Self, ConfigError> {
-        let mut config = Self::empty();
-        let opt_rc_path = env::var_os("HGRCPATH");
-        // HGRCPATH replaces system config
-        if opt_rc_path.is_none() {
-            config.add_system_config()?
-        }
-
-        config.add_for_environment_variable("EDITOR", b"ui", b"editor");
-        config.add_for_environment_variable("VISUAL", b"ui", b"editor");
-        config.add_for_environment_variable("PAGER", b"pager", b"pager");
-
-        // These are set by `run-tests.py --rhg` to enable fallback for the
-        // entire test suite. Alternatives would be setting configuration
-        // through `$HGRCPATH` but some tests override that, or changing the
-        // `hg` shell alias to include `--config` but that disrupts tests that
-        // print command lines and check expected output.
-        config.add_for_environment_variable(
-            "RHG_ON_UNSUPPORTED",
-            b"rhg",
-            b"on-unsupported",
-        );
-        config.add_for_environment_variable(
-            "RHG_FALLBACK_EXECUTABLE",
-            b"rhg",
-            b"fallback-executable",
-        );
-
-        // HGRCPATH replaces user config
-        if opt_rc_path.is_none() {
-            config.add_user_config()?
-        }
-        if let Some(rc_path) = &opt_rc_path {
-            for path in env::split_paths(rc_path) {
-                if !path.as_os_str().is_empty() {
-                    if path.is_dir() {
-                        config.add_trusted_dir(&path)?
-                    } else {
-                        config.add_trusted_file(&path)?
-                    }
-                }
-            }
-        }
-        Ok(config)
-    }
-
-    pub fn load_cli_args(
-        &mut self,
-        cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
-        color_arg: Option<Vec<u8>>,
-    ) -> Result<(), ConfigError> {
-        if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
-            self.layers.push(layer)
-        }
-        if let Some(arg) = color_arg {
-            let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
-            layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
-            self.layers.push(layer)
-        }
-        Ok(())
-    }
-
-    fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
-        if let Some(entries) = std::fs::read_dir(path)
-            .when_reading_file(path)
-            .io_not_found_as_none()?
-        {
-            let mut file_paths = entries
-                .map(|result| {
-                    result.when_reading_file(path).map(|entry| entry.path())
-                })
-                .collect::<Result<Vec<_>, _>>()?;
-            file_paths.sort();
-            for file_path in &file_paths {
-                if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
-                    self.add_trusted_file(&file_path)?
-                }
-            }
-        }
-        Ok(())
-    }
-
-    fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
-        if let Some(data) = std::fs::read(path)
-            .when_reading_file(path)
-            .io_not_found_as_none()?
-        {
-            self.layers.extend(ConfigLayer::parse(path, &data)?)
-        }
-        Ok(())
-    }
-
-    fn add_for_environment_variable(
-        &mut self,
-        var: &str,
-        section: &[u8],
-        key: &[u8],
-    ) {
-        if let Some(value) = env::var_os(var) {
-            let origin = layer::ConfigOrigin::Environment(var.into());
-            let mut layer = ConfigLayer::new(origin);
-            layer.add(
-                section.to_owned(),
-                key.to_owned(),
-                get_bytes_from_os_str(value),
-                None,
-            );
-            self.layers.push(layer)
-        }
-    }
-
-    #[cfg(unix)] // TODO: other platforms
-    fn add_system_config(&mut self) -> Result<(), ConfigError> {
-        let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
-            let etc = prefix.join("etc").join("mercurial");
-            self.add_trusted_file(&etc.join("hgrc"))?;
-            self.add_trusted_dir(&etc.join("hgrc.d"))
-        };
-        let root = Path::new("/");
-        // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
-        // instead? TODO: can this be a relative path?
-        let hg = crate::utils::current_exe()?;
-        // TODO: this order (per-installation then per-system) matches
-        // `systemrcpath()` in `mercurial/scmposix.py`, but
-        // `mercurial/helptext/config.txt` suggests it should be reversed
-        if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
-            if installation_prefix != root {
-                add_for_prefix(&installation_prefix)?
-            }
-        }
-        add_for_prefix(root)?;
-        Ok(())
-    }
-
-    #[cfg(unix)] // TODO: other plateforms
-    fn add_user_config(&mut self) -> Result<(), ConfigError> {
-        let opt_home = home::home_dir();
-        if let Some(home) = &opt_home {
-            self.add_trusted_file(&home.join(".hgrc"))?
-        }
-        let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
-        if !darwin {
-            if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
-                .map(PathBuf::from)
-                .or_else(|| opt_home.map(|home| home.join(".config")))
-            {
-                self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
-            }
-        }
-        Ok(())
-    }
-
-    /// Loads in order, which means that the precedence is the same
-    /// as the order of `sources`.
-    pub fn load_from_explicit_sources(
-        sources: Vec<ConfigSource>,
-    ) -> Result<Self, ConfigError> {
-        let mut layers = vec![];
-
-        for source in sources.into_iter() {
-            match source {
-                ConfigSource::Parsed(c) => layers.push(c),
-                ConfigSource::AbsPath(c) => {
-                    // TODO check if it should be trusted
-                    // mercurial/ui.py:427
-                    let data = match std::fs::read(&c) {
-                        Err(_) => continue, // same as the python code
-                        Ok(data) => data,
-                    };
-                    layers.extend(ConfigLayer::parse(&c, &data)?)
-                }
-            }
-        }
-
-        Ok(Config {
-            layers,
-            plain: PlainInfo::empty(),
-        })
-    }
-
-    /// Loads the per-repository config into a new `Config` which is combined
-    /// with `self`.
-    pub(crate) fn combine_with_repo(
-        &self,
-        repo_config_files: &[PathBuf],
-    ) -> Result<Self, ConfigError> {
-        let (cli_layers, other_layers) = self
-            .layers
-            .iter()
-            .cloned()
-            .partition(ConfigLayer::is_from_command_line);
-
-        let mut repo_config = Self {
-            layers: other_layers,
-            plain: PlainInfo::empty(),
-        };
-        for path in repo_config_files {
-            // TODO: check if this file should be trusted:
-            // `mercurial/ui.py:427`
-            repo_config.add_trusted_file(path)?;
-        }
-        repo_config.layers.extend(cli_layers);
-        Ok(repo_config)
-    }
-
-    pub fn apply_plain(&mut self, plain: PlainInfo) {
-        self.plain = plain;
-    }
-
-    fn get_parse<'config, T: 'config>(
-        &'config self,
-        section: &[u8],
-        item: &[u8],
-        expected_type: &'static str,
-        parse: impl Fn(&'config [u8]) -> Option<T>,
-    ) -> Result<Option<T>, ConfigValueParseError> {
-        match self.get_inner(&section, &item) {
-            Some((layer, v)) => match parse(&v.bytes) {
-                Some(b) => Ok(Some(b)),
-                None => Err(ConfigValueParseError {
-                    origin: layer.origin.to_owned(),
-                    line: v.line,
-                    value: v.bytes.to_owned(),
-                    section: section.to_owned(),
-                    item: item.to_owned(),
-                    expected_type,
-                }),
-            },
-            None => Ok(None),
-        }
-    }
-
-    /// Returns an `Err` if the first value found is not a valid UTF-8 string.
-    /// Otherwise, returns an `Ok(value)` if found, or `None`.
-    pub fn get_str(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Result<Option<&str>, ConfigValueParseError> {
-        self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
-            str::from_utf8(value).ok()
-        })
-    }
-
-    /// Returns an `Err` if the first value found is not a valid unsigned
-    /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
-    pub fn get_u32(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Result<Option<u32>, ConfigValueParseError> {
-        self.get_parse(section, item, "valid integer", |value| {
-            str::from_utf8(value).ok()?.parse().ok()
-        })
-    }
-
-    /// Returns an `Err` if the first value found is not a valid file size
-    /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
-    /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
-    pub fn get_byte_size(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Result<Option<u64>, ConfigValueParseError> {
-        self.get_parse(section, item, "byte quantity", values::parse_byte_size)
-    }
-
-    /// Returns an `Err` if the first value found is not a valid boolean.
-    /// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
-    /// found, or `None`.
-    pub fn get_option(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Result<Option<bool>, ConfigValueParseError> {
-        self.get_parse(section, item, "boolean", values::parse_bool)
-    }
-
-    /// Returns the corresponding boolean in the config. Returns `Ok(false)`
-    /// if the value is not found, an `Err` if it's not a valid boolean.
-    pub fn get_bool(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Result<bool, ConfigValueParseError> {
-        Ok(self.get_option(section, item)?.unwrap_or(false))
-    }
-
-    /// Returns `true` if the extension is enabled, `false` otherwise
-    pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
-        let value = self.get(b"extensions", extension);
-        match value {
-            Some(c) => !c.starts_with(b"!"),
-            None => false,
-        }
-    }
-
-    /// If there is an `item` value in `section`, parse and return a list of
-    /// byte strings.
-    pub fn get_list(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Option<Vec<Vec<u8>>> {
-        self.get(section, item).map(values::parse_list)
-    }
-
-    /// Returns the raw value bytes of the first one found, or `None`.
-    pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
-        self.get_inner(section, item)
-            .map(|(_, value)| value.bytes.as_ref())
-    }
-
-    /// Returns the raw value bytes of the first one found, or `None`.
-    pub fn get_with_origin(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Option<(&[u8], &ConfigOrigin)> {
-        self.get_inner(section, item)
-            .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
-    }
-
-    /// Returns the layer and the value of the first one found, or `None`.
-    fn get_inner(
-        &self,
-        section: &[u8],
-        item: &[u8],
-    ) -> Option<(&ConfigLayer, &ConfigValue)> {
-        // Filter out the config items that are hidden by [PLAIN].
-        // This differs from python hg where we delete them from the config.
-        let should_ignore = should_ignore(&self.plain, &section, &item);
-        for layer in self.layers.iter().rev() {
-            if !layer.trusted {
-                continue;
-            }
-            //The [PLAIN] config should not affect the defaults.
-            //
-            // However, PLAIN should also affect the "tweaked" defaults (unless
-            // "tweakdefault" is part of "HGPLAINEXCEPT").
-            //
-            // In practice the tweak-default layer is only added when it is
-            // relevant, so we can safely always take it into
-            // account here.
-            if should_ignore && !(layer.origin == ConfigOrigin::Tweakdefaults)
-            {
-                continue;
-            }
-            if let Some(v) = layer.get(&section, &item) {
-                return Some((&layer, v));
-            }
-        }
-        None
-    }
-
-    /// Return all keys defined for the given section
-    pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
-        self.layers
-            .iter()
-            .flat_map(|layer| layer.iter_keys(section))
-            .collect()
-    }
-
-    /// Returns whether any key is defined in the given section
-    pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
-        self.layers
-            .iter()
-            .any(|layer| layer.has_non_empty_section(section))
-    }
-
-    /// Yields (key, value) pairs for everything in the given section
-    pub fn iter_section<'a>(
-        &'a self,
-        section: &'a [u8],
-    ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
-        // TODO: Use `Iterator`’s `.peekable()` when its `peek_mut` is
-        // available:
-        // https://doc.rust-lang.org/nightly/std/iter/struct.Peekable.html#method.peek_mut
-        struct Peekable<I: Iterator> {
-            iter: I,
-            /// Remember a peeked value, even if it was None.
-            peeked: Option<Option<I::Item>>,
-        }
-
-        impl<I: Iterator> Peekable<I> {
-            fn new(iter: I) -> Self {
-                Self { iter, peeked: None }
-            }
-
-            fn next(&mut self) {
-                self.peeked = None
-            }
-
-            fn peek_mut(&mut self) -> Option<&mut I::Item> {
-                let iter = &mut self.iter;
-                self.peeked.get_or_insert_with(|| iter.next()).as_mut()
-            }
-        }
-
-        // Deduplicate keys redefined in multiple layers
-        let mut keys_already_seen = HashSet::new();
-        let mut key_is_new =
-            move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
-                keys_already_seen.insert(key)
-            };
-        // This is similar to `flat_map` + `filter_map`, except with a single
-        // closure that owns `key_is_new` (and therefore the
-        // `keys_already_seen` set):
-        let mut layer_iters = Peekable::new(
-            self.layers
-                .iter()
-                .rev()
-                .map(move |layer| layer.iter_section(section)),
-        );
-        std::iter::from_fn(move || loop {
-            if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
-                return Some(pair);
-            } else {
-                layer_iters.next();
-            }
-        })
-    }
-
-    /// Get raw values bytes from all layers (even untrusted ones) in order
-    /// of precedence.
-    #[cfg(test)]
-    fn get_all(&self, section: &[u8], item: &[u8]) -> Vec<&[u8]> {
-        let mut res = vec![];
-        for layer in self.layers.iter().rev() {
-            if let Some(v) = layer.get(&section, &item) {
-                res.push(v.bytes.as_ref());
-            }
-        }
-        res
-    }
-
-    // a config layer that's introduced by ui.tweakdefaults
-    fn tweakdefaults_layer() -> ConfigLayer {
-        let mut layer = ConfigLayer::new(ConfigOrigin::Tweakdefaults);
-
-        let mut add = |section: &[u8], item: &[u8], value: &[u8]| {
-            layer.add(
-                section[..].into(),
-                item[..].into(),
-                value[..].into(),
-                None,
-            );
-        };
-        // duplication of [tweakrc] from [ui.py]
-        add(b"ui", b"rollback", b"False");
-        add(b"ui", b"statuscopies", b"yes");
-        add(b"ui", b"interface", b"curses");
-        add(b"ui", b"relative-paths", b"yes");
-        add(b"commands", b"grep.all-files", b"True");
-        add(b"commands", b"update.check", b"noconflict");
-        add(b"commands", b"status.verbose", b"True");
-        add(b"commands", b"resolve.explicit-re-merge", b"True");
-        add(b"git", b"git", b"1");
-        add(b"git", b"showfunc", b"1");
-        add(b"git", b"word-diff", b"1");
-        return layer;
-    }
-
-    // introduce the tweaked defaults as implied by ui.tweakdefaults
-    pub fn tweakdefaults<'a>(&mut self) -> () {
-        self.layers.insert(0, Config::tweakdefaults_layer());
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use pretty_assertions::assert_eq;
-    use std::fs::File;
-    use std::io::Write;
-
-    #[test]
-    fn test_include_layer_ordering() {
-        let tmpdir = tempfile::tempdir().unwrap();
-        let tmpdir_path = tmpdir.path();
-        let mut included_file =
-            File::create(&tmpdir_path.join("included.rc")).unwrap();
-
-        included_file.write_all(b"[section]\nitem=value1").unwrap();
-        let base_config_path = tmpdir_path.join("base.rc");
-        let mut config_file = File::create(&base_config_path).unwrap();
-        let data =
-            b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
-              [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
-        config_file.write_all(data).unwrap();
-
-        let sources = vec![ConfigSource::AbsPath(base_config_path)];
-        let config = Config::load_from_explicit_sources(sources)
-            .expect("expected valid config");
-
-        let (_, value) = config.get_inner(b"section", b"item").unwrap();
-        assert_eq!(
-            value,
-            &ConfigValue {
-                bytes: b"value2".to_vec(),
-                line: Some(4)
-            }
-        );
-
-        let value = config.get(b"section", b"item").unwrap();
-        assert_eq!(value, b"value2",);
-        assert_eq!(
-            config.get_all(b"section", b"item"),
-            [b"value2", b"value1", b"value0"]
-        );
-
-        assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
-        assert_eq!(
-            config.get_byte_size(b"section2", b"size").unwrap(),
-            Some(1024 + 512)
-        );
-        assert!(config.get_u32(b"section2", b"not-count").is_err());
-        assert!(config.get_byte_size(b"section2", b"not-size").is_err());
-    }
-}
--- a/rust/hg-core/src/config/layer.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/config/layer.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -94,11 +94,7 @@
 
     /// Returns whether this layer comes from `--config` CLI arguments
     pub(crate) fn is_from_command_line(&self) -> bool {
-        if let ConfigOrigin::CommandLine = self.origin {
-            true
-        } else {
-            false
-        }
+        matches!(self.origin, ConfigOrigin::CommandLine)
     }
 
     /// Add an entry to the config, overwriting the old one if already present.
@@ -111,13 +107,13 @@
     ) {
         self.sections
             .entry(section)
-            .or_insert_with(|| HashMap::new())
+            .or_insert_with(HashMap::new)
             .insert(item, ConfigValue { bytes: value, line });
     }
 
     /// Returns the config value in `<section>.<item>` if it exists
     pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&ConfigValue> {
-        Some(self.sections.get(section)?.get(item)?)
+        self.sections.get(section)?.get(item)
     }
 
     /// Returns the keys defined in the given section
@@ -171,7 +167,7 @@
 
         while let Some((index, bytes)) = lines_iter.next() {
             let line = Some(index + 1);
-            if let Some(m) = INCLUDE_RE.captures(&bytes) {
+            if let Some(m) = INCLUDE_RE.captures(bytes) {
                 let filename_bytes = &m[1];
                 let filename_bytes = crate::utils::expand_vars(filename_bytes);
                 // `Path::parent` only fails for the root directory,
@@ -205,18 +201,18 @@
                         }
                     }
                 }
-            } else if let Some(_) = EMPTY_RE.captures(&bytes) {
-            } else if let Some(m) = SECTION_RE.captures(&bytes) {
+            } else if EMPTY_RE.captures(bytes).is_some() {
+            } else if let Some(m) = SECTION_RE.captures(bytes) {
                 section = m[1].to_vec();
-            } else if let Some(m) = ITEM_RE.captures(&bytes) {
+            } else if let Some(m) = ITEM_RE.captures(bytes) {
                 let item = m[1].to_vec();
                 let mut value = m[2].to_vec();
                 loop {
                     match lines_iter.peek() {
                         None => break,
                         Some((_, v)) => {
-                            if let Some(_) = COMMENT_RE.captures(&v) {
-                            } else if let Some(_) = CONT_RE.captures(&v) {
+                            if COMMENT_RE.captures(v).is_some() {
+                            } else if CONT_RE.captures(v).is_some() {
                                 value.extend(b"\n");
                                 value.extend(&m[1]);
                             } else {
@@ -227,7 +223,7 @@
                     lines_iter.next();
                 }
                 current_layer.add(section.clone(), item, value, line);
-            } else if let Some(m) = UNSET_RE.captures(&bytes) {
+            } else if let Some(m) = UNSET_RE.captures(bytes) {
                 if let Some(map) = current_layer.sections.get_mut(&section) {
                     map.remove(&m[1]);
                 }
@@ -261,7 +257,7 @@
         sections.sort_by(|e0, e1| e0.0.cmp(e1.0));
 
         for (section, items) in sections.into_iter() {
-            let mut items: Vec<_> = items.into_iter().collect();
+            let mut items: Vec<_> = items.iter().collect();
             items.sort_by(|e0, e1| e0.0.cmp(e1.0));
 
             for (item, config_entry) in items {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/config/mod.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,636 @@
+// config.rs
+//
+// Copyright 2020
+//      Valentin Gatien-Baron,
+//      Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Mercurial config parsing and interfaces.
+
+mod layer;
+mod plain_info;
+mod values;
+pub use layer::{ConfigError, ConfigOrigin, ConfigParseError};
+pub use plain_info::PlainInfo;
+
+use self::layer::ConfigLayer;
+use self::layer::ConfigValue;
+use crate::errors::{HgResultExt, IoResultExt};
+use crate::utils::files::get_bytes_from_os_str;
+use format_bytes::{write_bytes, DisplayBytes};
+use std::collections::HashSet;
+use std::env;
+use std::fmt;
+use std::path::{Path, PathBuf};
+use std::str;
+
+/// Holds the config values for the current repository
+/// TODO update this docstring once we support more sources
+#[derive(Clone)]
+pub struct Config {
+    layers: Vec<layer::ConfigLayer>,
+    plain: PlainInfo,
+}
+
+impl DisplayBytes for Config {
+    fn display_bytes(
+        &self,
+        out: &mut dyn std::io::Write,
+    ) -> std::io::Result<()> {
+        for (index, layer) in self.layers.iter().rev().enumerate() {
+            write_bytes!(
+                out,
+                b"==== Layer {} (trusted: {}) ====\n{}",
+                index,
+                if layer.trusted {
+                    &b"yes"[..]
+                } else {
+                    &b"no"[..]
+                },
+                layer
+            )?;
+        }
+        Ok(())
+    }
+}
+
+pub enum ConfigSource {
+    /// Absolute path to a config file
+    AbsPath(PathBuf),
+    /// Already parsed (from the CLI, env, Python resources, etc.)
+    Parsed(layer::ConfigLayer),
+}
+
+#[derive(Debug)]
+pub struct ConfigValueParseErrorDetails {
+    pub origin: ConfigOrigin,
+    pub line: Option<usize>,
+    pub section: Vec<u8>,
+    pub item: Vec<u8>,
+    pub value: Vec<u8>,
+    pub expected_type: &'static str,
+}
+
+// boxed to avoid very large Result types
+pub type ConfigValueParseError = Box<ConfigValueParseErrorDetails>;
+
+impl fmt::Display for ConfigValueParseError {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        // TODO: add origin and line number information, here and in
+        // corresponding python code
+        write!(
+            f,
+            "config error: {}.{} is not a {} ('{}')",
+            String::from_utf8_lossy(&self.section),
+            String::from_utf8_lossy(&self.item),
+            self.expected_type,
+            String::from_utf8_lossy(&self.value)
+        )
+    }
+}
+
+/// Returns true if the config item is disabled by PLAIN or PLAINEXCEPT
+fn should_ignore(plain: &PlainInfo, section: &[u8], item: &[u8]) -> bool {
+    // duplication with [_applyconfig] in [ui.py],
+    if !plain.is_plain() {
+        return false;
+    }
+    if section == b"alias" {
+        return plain.plainalias();
+    }
+    if section == b"revsetalias" {
+        return plain.plainrevsetalias();
+    }
+    if section == b"templatealias" {
+        return plain.plaintemplatealias();
+    }
+    if section == b"ui" {
+        let to_delete: &[&[u8]] = &[
+            b"debug",
+            b"fallbackencoding",
+            b"quiet",
+            b"slash",
+            b"logtemplate",
+            b"message-output",
+            b"statuscopies",
+            b"style",
+            b"traceback",
+            b"verbose",
+        ];
+        return to_delete.contains(&item);
+    }
+    let sections_to_delete: &[&[u8]] =
+        &[b"defaults", b"commands", b"command-templates"];
+    sections_to_delete.contains(&section)
+}
+
+impl Config {
+    /// The configuration to use when printing configuration-loading errors
+    pub fn empty() -> Self {
+        Self {
+            layers: Vec::new(),
+            plain: PlainInfo::empty(),
+        }
+    }
+
+    /// Load system and user configuration from various files.
+    ///
+    /// This is also affected by some environment variables.
+    pub fn load_non_repo() -> Result<Self, ConfigError> {
+        let mut config = Self::empty();
+        let opt_rc_path = env::var_os("HGRCPATH");
+        // HGRCPATH replaces system config
+        if opt_rc_path.is_none() {
+            config.add_system_config()?
+        }
+
+        config.add_for_environment_variable("EDITOR", b"ui", b"editor");
+        config.add_for_environment_variable("VISUAL", b"ui", b"editor");
+        config.add_for_environment_variable("PAGER", b"pager", b"pager");
+
+        // These are set by `run-tests.py --rhg` to enable fallback for the
+        // entire test suite. Alternatives would be setting configuration
+        // through `$HGRCPATH` but some tests override that, or changing the
+        // `hg` shell alias to include `--config` but that disrupts tests that
+        // print command lines and check expected output.
+        config.add_for_environment_variable(
+            "RHG_ON_UNSUPPORTED",
+            b"rhg",
+            b"on-unsupported",
+        );
+        config.add_for_environment_variable(
+            "RHG_FALLBACK_EXECUTABLE",
+            b"rhg",
+            b"fallback-executable",
+        );
+
+        // HGRCPATH replaces user config
+        if opt_rc_path.is_none() {
+            config.add_user_config()?
+        }
+        if let Some(rc_path) = &opt_rc_path {
+            for path in env::split_paths(rc_path) {
+                if !path.as_os_str().is_empty() {
+                    if path.is_dir() {
+                        config.add_trusted_dir(&path)?
+                    } else {
+                        config.add_trusted_file(&path)?
+                    }
+                }
+            }
+        }
+        Ok(config)
+    }
+
+    pub fn load_cli_args(
+        &mut self,
+        cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
+        color_arg: Option<Vec<u8>>,
+    ) -> Result<(), ConfigError> {
+        if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
+            self.layers.push(layer)
+        }
+        if let Some(arg) = color_arg {
+            let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
+            layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
+            self.layers.push(layer)
+        }
+        Ok(())
+    }
+
+    fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
+        if let Some(entries) = std::fs::read_dir(path)
+            .when_reading_file(path)
+            .io_not_found_as_none()?
+        {
+            let mut file_paths = entries
+                .map(|result| {
+                    result.when_reading_file(path).map(|entry| entry.path())
+                })
+                .collect::<Result<Vec<_>, _>>()?;
+            file_paths.sort();
+            for file_path in &file_paths {
+                if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
+                    self.add_trusted_file(file_path)?
+                }
+            }
+        }
+        Ok(())
+    }
+
+    fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
+        if let Some(data) = std::fs::read(path)
+            .when_reading_file(path)
+            .io_not_found_as_none()?
+        {
+            self.layers.extend(ConfigLayer::parse(path, &data)?)
+        }
+        Ok(())
+    }
+
+    fn add_for_environment_variable(
+        &mut self,
+        var: &str,
+        section: &[u8],
+        key: &[u8],
+    ) {
+        if let Some(value) = env::var_os(var) {
+            let origin = layer::ConfigOrigin::Environment(var.into());
+            let mut layer = ConfigLayer::new(origin);
+            layer.add(
+                section.to_owned(),
+                key.to_owned(),
+                get_bytes_from_os_str(value),
+                None,
+            );
+            self.layers.push(layer)
+        }
+    }
+
+    #[cfg(unix)] // TODO: other platforms
+    fn add_system_config(&mut self) -> Result<(), ConfigError> {
+        let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
+            let etc = prefix.join("etc").join("mercurial");
+            self.add_trusted_file(&etc.join("hgrc"))?;
+            self.add_trusted_dir(&etc.join("hgrc.d"))
+        };
+        let root = Path::new("/");
+        // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
+        // instead? TODO: can this be a relative path?
+        let hg = crate::utils::current_exe()?;
+        // TODO: this order (per-installation then per-system) matches
+        // `systemrcpath()` in `mercurial/scmposix.py`, but
+        // `mercurial/helptext/config.txt` suggests it should be reversed
+        if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
+            if installation_prefix != root {
+                add_for_prefix(installation_prefix)?
+            }
+        }
+        add_for_prefix(root)?;
+        Ok(())
+    }
+
+    #[cfg(unix)] // TODO: other plateforms
+    fn add_user_config(&mut self) -> Result<(), ConfigError> {
+        let opt_home = home::home_dir();
+        if let Some(home) = &opt_home {
+            self.add_trusted_file(&home.join(".hgrc"))?
+        }
+        let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
+        if !darwin {
+            if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
+                .map(PathBuf::from)
+                .or_else(|| opt_home.map(|home| home.join(".config")))
+            {
+                self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
+            }
+        }
+        Ok(())
+    }
+
+    /// Loads in order, which means that the precedence is the same
+    /// as the order of `sources`.
+    pub fn load_from_explicit_sources(
+        sources: Vec<ConfigSource>,
+    ) -> Result<Self, ConfigError> {
+        let mut layers = vec![];
+
+        for source in sources.into_iter() {
+            match source {
+                ConfigSource::Parsed(c) => layers.push(c),
+                ConfigSource::AbsPath(c) => {
+                    // TODO check if it should be trusted
+                    // mercurial/ui.py:427
+                    let data = match std::fs::read(&c) {
+                        Err(_) => continue, // same as the python code
+                        Ok(data) => data,
+                    };
+                    layers.extend(ConfigLayer::parse(&c, &data)?)
+                }
+            }
+        }
+
+        Ok(Config {
+            layers,
+            plain: PlainInfo::empty(),
+        })
+    }
+
+    /// Loads the per-repository config into a new `Config` which is combined
+    /// with `self`.
+    pub(crate) fn combine_with_repo(
+        &self,
+        repo_config_files: &[PathBuf],
+    ) -> Result<Self, ConfigError> {
+        let (cli_layers, other_layers) = self
+            .layers
+            .iter()
+            .cloned()
+            .partition(ConfigLayer::is_from_command_line);
+
+        let mut repo_config = Self {
+            layers: other_layers,
+            plain: PlainInfo::empty(),
+        };
+        for path in repo_config_files {
+            // TODO: check if this file should be trusted:
+            // `mercurial/ui.py:427`
+            repo_config.add_trusted_file(path)?;
+        }
+        repo_config.layers.extend(cli_layers);
+        Ok(repo_config)
+    }
+
+    pub fn apply_plain(&mut self, plain: PlainInfo) {
+        self.plain = plain;
+    }
+
+    fn get_parse<'config, T: 'config>(
+        &'config self,
+        section: &[u8],
+        item: &[u8],
+        expected_type: &'static str,
+        parse: impl Fn(&'config [u8]) -> Option<T>,
+    ) -> Result<Option<T>, ConfigValueParseError> {
+        match self.get_inner(section, item) {
+            Some((layer, v)) => match parse(&v.bytes) {
+                Some(b) => Ok(Some(b)),
+                None => Err(Box::new(ConfigValueParseErrorDetails {
+                    origin: layer.origin.to_owned(),
+                    line: v.line,
+                    value: v.bytes.to_owned(),
+                    section: section.to_owned(),
+                    item: item.to_owned(),
+                    expected_type,
+                })),
+            },
+            None => Ok(None),
+        }
+    }
+
+    /// Returns an `Err` if the first value found is not a valid UTF-8 string.
+    /// Otherwise, returns an `Ok(value)` if found, or `None`.
+    pub fn get_str(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<&str>, ConfigValueParseError> {
+        self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
+            str::from_utf8(value).ok()
+        })
+    }
+
+    /// Returns an `Err` if the first value found is not a valid unsigned
+    /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
+    pub fn get_u32(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<u32>, ConfigValueParseError> {
+        self.get_parse(section, item, "valid integer", |value| {
+            str::from_utf8(value).ok()?.parse().ok()
+        })
+    }
+
+    /// Returns an `Err` if the first value found is not a valid file size
+    /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
+    /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
+    pub fn get_byte_size(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<u64>, ConfigValueParseError> {
+        self.get_parse(section, item, "byte quantity", values::parse_byte_size)
+    }
+
+    /// Returns an `Err` if the first value found is not a valid boolean.
+    /// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
+    /// found, or `None`.
+    pub fn get_option(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<bool>, ConfigValueParseError> {
+        self.get_parse(section, item, "boolean", values::parse_bool)
+    }
+
+    /// Returns the corresponding boolean in the config. Returns `Ok(false)`
+    /// if the value is not found, an `Err` if it's not a valid boolean.
+    pub fn get_bool(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<bool, ConfigValueParseError> {
+        Ok(self.get_option(section, item)?.unwrap_or(false))
+    }
+
+    /// Returns `true` if the extension is enabled, `false` otherwise
+    pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
+        let value = self.get(b"extensions", extension);
+        match value {
+            Some(c) => !c.starts_with(b"!"),
+            None => false,
+        }
+    }
+
+    /// If there is an `item` value in `section`, parse and return a list of
+    /// byte strings.
+    pub fn get_list(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Option<Vec<Vec<u8>>> {
+        self.get(section, item).map(values::parse_list)
+    }
+
+    /// Returns the raw value bytes of the first one found, or `None`.
+    pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
+        self.get_inner(section, item)
+            .map(|(_, value)| value.bytes.as_ref())
+    }
+
+    /// Returns the raw value bytes of the first one found, or `None`.
+    pub fn get_with_origin(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Option<(&[u8], &ConfigOrigin)> {
+        self.get_inner(section, item)
+            .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
+    }
+
+    /// Returns the layer and the value of the first one found, or `None`.
+    fn get_inner(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Option<(&ConfigLayer, &ConfigValue)> {
+        // Filter out the config items that are hidden by [PLAIN].
+        // This differs from python hg where we delete them from the config.
+        let should_ignore = should_ignore(&self.plain, section, item);
+        for layer in self.layers.iter().rev() {
+            if !layer.trusted {
+                continue;
+            }
+            //The [PLAIN] config should not affect the defaults.
+            //
+            // However, PLAIN should also affect the "tweaked" defaults (unless
+            // "tweakdefault" is part of "HGPLAINEXCEPT").
+            //
+            // In practice the tweak-default layer is only added when it is
+            // relevant, so we can safely always take it into
+            // account here.
+            if should_ignore && !(layer.origin == ConfigOrigin::Tweakdefaults)
+            {
+                continue;
+            }
+            if let Some(v) = layer.get(section, item) {
+                return Some((layer, v));
+            }
+        }
+        None
+    }
+
+    /// Return all keys defined for the given section
+    pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
+        self.layers
+            .iter()
+            .flat_map(|layer| layer.iter_keys(section))
+            .collect()
+    }
+
+    /// Returns whether any key is defined in the given section
+    pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
+        self.layers
+            .iter()
+            .any(|layer| layer.has_non_empty_section(section))
+    }
+
+    /// Yields (key, value) pairs for everything in the given section
+    pub fn iter_section<'a>(
+        &'a self,
+        section: &'a [u8],
+    ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
+        // Deduplicate keys redefined in multiple layers
+        let mut keys_already_seen = HashSet::new();
+        let mut key_is_new =
+            move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
+                keys_already_seen.insert(key)
+            };
+        // This is similar to `flat_map` + `filter_map`, except with a single
+        // closure that owns `key_is_new` (and therefore the
+        // `keys_already_seen` set):
+        let mut layer_iters = self
+            .layers
+            .iter()
+            .rev()
+            .map(move |layer| layer.iter_section(section))
+            .peekable();
+        std::iter::from_fn(move || loop {
+            if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
+                return Some(pair);
+            } else {
+                layer_iters.next();
+            }
+        })
+    }
+
+    /// Get raw values bytes from all layers (even untrusted ones) in order
+    /// of precedence.
+    #[cfg(test)]
+    fn get_all(&self, section: &[u8], item: &[u8]) -> Vec<&[u8]> {
+        let mut res = vec![];
+        for layer in self.layers.iter().rev() {
+            if let Some(v) = layer.get(section, item) {
+                res.push(v.bytes.as_ref());
+            }
+        }
+        res
+    }
+
+    // a config layer that's introduced by ui.tweakdefaults
+    fn tweakdefaults_layer() -> ConfigLayer {
+        let mut layer = ConfigLayer::new(ConfigOrigin::Tweakdefaults);
+
+        let mut add = |section: &[u8], item: &[u8], value: &[u8]| {
+            layer.add(
+                section[..].into(),
+                item[..].into(),
+                value[..].into(),
+                None,
+            );
+        };
+        // duplication of [tweakrc] from [ui.py]
+        add(b"ui", b"rollback", b"False");
+        add(b"ui", b"statuscopies", b"yes");
+        add(b"ui", b"interface", b"curses");
+        add(b"ui", b"relative-paths", b"yes");
+        add(b"commands", b"grep.all-files", b"True");
+        add(b"commands", b"update.check", b"noconflict");
+        add(b"commands", b"status.verbose", b"True");
+        add(b"commands", b"resolve.explicit-re-merge", b"True");
+        add(b"git", b"git", b"1");
+        add(b"git", b"showfunc", b"1");
+        add(b"git", b"word-diff", b"1");
+        layer
+    }
+
+    // introduce the tweaked defaults as implied by ui.tweakdefaults
+    pub fn tweakdefaults(&mut self) {
+        self.layers.insert(0, Config::tweakdefaults_layer());
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use pretty_assertions::assert_eq;
+    use std::fs::File;
+    use std::io::Write;
+
+    #[test]
+    fn test_include_layer_ordering() {
+        let tmpdir = tempfile::tempdir().unwrap();
+        let tmpdir_path = tmpdir.path();
+        let mut included_file =
+            File::create(&tmpdir_path.join("included.rc")).unwrap();
+
+        included_file.write_all(b"[section]\nitem=value1").unwrap();
+        let base_config_path = tmpdir_path.join("base.rc");
+        let mut config_file = File::create(&base_config_path).unwrap();
+        let data =
+            b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
+              [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
+        config_file.write_all(data).unwrap();
+
+        let sources = vec![ConfigSource::AbsPath(base_config_path)];
+        let config = Config::load_from_explicit_sources(sources)
+            .expect("expected valid config");
+
+        let (_, value) = config.get_inner(b"section", b"item").unwrap();
+        assert_eq!(
+            value,
+            &ConfigValue {
+                bytes: b"value2".to_vec(),
+                line: Some(4)
+            }
+        );
+
+        let value = config.get(b"section", b"item").unwrap();
+        assert_eq!(value, b"value2",);
+        assert_eq!(
+            config.get_all(b"section", b"item"),
+            [b"value2", b"value1", b"value0"]
+        );
+
+        assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
+        assert_eq!(
+            config.get_byte_size(b"section2", b"size").unwrap(),
+            Some(1024 + 512)
+        );
+        assert!(config.get_u32(b"section2", b"not-count").is_err());
+        assert!(config.get_byte_size(b"section2", b"not-size").is_err());
+    }
+}
--- a/rust/hg-core/src/config/values.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/config/values.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -30,10 +30,8 @@
         ("b", 1 << 0), // Needs to be last
     ];
     for &(unit, multiplier) in UNITS {
-        // TODO: use `value.strip_suffix(unit)` when we require Rust 1.45+
-        if value.ends_with(unit) {
-            let value_before_unit = &value[..value.len() - unit.len()];
-            let float: f64 = value_before_unit.trim().parse().ok()?;
+        if let Some(value) = value.strip_suffix(unit) {
+            let float: f64 = value.trim().parse().ok()?;
             if float >= 0.0 {
                 return Some((float * multiplier as f64).round() as u64);
             } else {
@@ -202,11 +200,7 @@
 
     // https://docs.python.org/3/library/stdtypes.html?#bytes.isspace
     fn is_space(byte: u8) -> bool {
-        if let b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c' = byte {
-            true
-        } else {
-            false
-        }
+        matches!(byte, b' ' | b'\t' | b'\n' | b'\r' | b'\x0b' | b'\x0c')
     }
 }
 
--- a/rust/hg-core/src/copy_tracing.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/copy_tracing.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -59,7 +59,7 @@
         Self {
             rev,
             path: winner.path,
-            overwritten: overwritten,
+            overwritten,
         }
     }
 
@@ -489,7 +489,7 @@
                         if cs1 == cs2 {
                             cs1.mark_delete(current_rev);
                         } else {
-                            cs1.mark_delete_with_pair(current_rev, &cs2);
+                            cs1.mark_delete_with_pair(current_rev, cs2);
                         }
                         e2.insert(cs1.clone());
                     }
@@ -513,15 +513,14 @@
 ) {
     let dest = path_map.tokenize(path_dest);
     let source = path_map.tokenize(path_source);
-    let entry;
-    if let Some(v) = base_copies.get(&source) {
-        entry = match &v.path {
+    let entry = if let Some(v) = base_copies.get(&source) {
+        match &v.path {
             Some(path) => Some((*(path)).to_owned()),
             None => Some(source.to_owned()),
         }
     } else {
-        entry = Some(source.to_owned());
-    }
+        Some(source.to_owned())
+    };
     // Each new entry is introduced by the children, we
     // record this information as we will need it to take
     // the right decision when merging conflicting copy
@@ -563,17 +562,15 @@
                 MergePick::Major | MergePick::Any => (src_major, src_minor),
                 MergePick::Minor => (src_minor, src_major),
             };
-            MergeResult::UseNewValue(CopySource::new_from_merge(
+            MergeResult::New(CopySource::new_from_merge(
                 current_merge,
                 winner,
                 loser,
             ))
         } else {
             match pick {
-                MergePick::Any | MergePick::Major => {
-                    MergeResult::UseRightValue
-                }
-                MergePick::Minor => MergeResult::UseLeftValue,
+                MergePick::Any | MergePick::Major => MergeResult::Right,
+                MergePick::Minor => MergeResult::Left,
             }
         }
     })
@@ -613,7 +610,7 @@
         // eventually.
         (MergePick::Minor, true)
     } else if src_major.path == src_minor.path {
-        debug_assert!(src_major.rev != src_major.rev);
+        debug_assert!(src_major.rev != src_minor.rev);
         // we have the same value, but from other source;
         if src_major.is_overwritten_by(src_minor) {
             (MergePick::Minor, false)
@@ -623,7 +620,7 @@
             (MergePick::Any, true)
         }
     } else {
-        debug_assert!(src_major.rev != src_major.rev);
+        debug_assert!(src_major.rev != src_minor.rev);
         let action = merge_case_for_dest();
         if src_minor.path.is_some()
             && src_major.path.is_none()
--- a/rust/hg-core/src/copy_tracing/tests.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/copy_tracing/tests.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -118,7 +118,7 @@
     // keys to copy source values. Note: the arrows for map literal syntax
     // point **backwards** compared to the logical direction of copy!
 
-    use crate::NULL_REVISION as NULL;
+    use crate::revlog::NULL_REVISION as NULL;
     use Action::*;
     use MergeCase::*;
 
--- a/rust/hg-core/src/dagops.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dagops.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -181,7 +181,7 @@
         let mut revs: HashSet<Revision> = revs.iter().cloned().collect();
         retain_heads(graph, &mut revs)?;
         let mut as_vec: Vec<Revision> = revs.iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         Ok(as_vec)
     }
 
@@ -206,7 +206,7 @@
     ) -> Result<Vec<Revision>, GraphError> {
         let heads = heads(graph, revs.iter())?;
         let mut as_vec: Vec<Revision> = heads.iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         Ok(as_vec)
     }
 
@@ -231,7 +231,7 @@
     ) -> Result<Vec<Revision>, GraphError> {
         let set: HashSet<_> = revs.iter().cloned().collect();
         let mut as_vec = roots(graph, &set)?;
-        as_vec.sort();
+        as_vec.sort_unstable();
         Ok(as_vec)
     }
 
--- a/rust/hg-core/src/dirstate.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -32,7 +32,7 @@
     };
 
     pub fn is_merge(&self) -> bool {
-        return !(self.p2 == NULL_NODE);
+        !(self.p2 == NULL_NODE)
     }
 }
 
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -232,7 +232,7 @@
     #[test]
     fn test_delete_path_empty_path() {
         let mut map =
-            DirsMultiset::from_manifest(&vec![HgPathBuf::new()]).unwrap();
+            DirsMultiset::from_manifest(&[HgPathBuf::new()]).unwrap();
         let path = HgPath::new(b"");
         assert_eq!(Ok(()), map.delete_path(path));
         assert_eq!(
--- a/rust/hg-core/src/dirstate/entry.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate/entry.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,7 +1,6 @@
 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
 use crate::errors::HgError;
 use bitflags::bitflags;
-use std::convert::{TryFrom, TryInto};
 use std::fs;
 use std::io;
 use std::time::{SystemTime, UNIX_EPOCH};
@@ -181,11 +180,7 @@
         if self.truncated_seconds != other.truncated_seconds {
             false
         } else if self.nanoseconds == 0 || other.nanoseconds == 0 {
-            if self.second_ambiguous {
-                false
-            } else {
-                true
-            }
+            !self.second_ambiguous
         } else {
             self.nanoseconds == other.nanoseconds
         }
@@ -423,6 +418,8 @@
     }
 
     pub fn maybe_clean(&self) -> bool {
+        #[allow(clippy::if_same_then_else)]
+        #[allow(clippy::needless_bool)]
         if !self.flags.contains(Flags::WDIR_TRACKED) {
             false
         } else if !self.flags.contains(Flags::P1_TRACKED) {
@@ -512,6 +509,8 @@
             // TODO: return an Option instead?
             panic!("Accessing v1_mtime of an untracked DirstateEntry")
         }
+
+        #[allow(clippy::if_same_then_else)]
         if self.removed() {
             0
         } else if self.flags.contains(Flags::P2_INFO) {
@@ -703,9 +702,9 @@
     }
 }
 
-impl Into<u8> for EntryState {
-    fn into(self) -> u8 {
-        match self {
+impl From<EntryState> for u8 {
+    fn from(val: EntryState) -> Self {
+        match val {
             EntryState::Normal => b'n',
             EntryState::Added => b'a',
             EntryState::Removed => b'r',
--- a/rust/hg-core/src/dirstate/parsers.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate/parsers.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -8,8 +8,6 @@
 use crate::{dirstate::EntryState, DirstateEntry, DirstateParents};
 use byteorder::{BigEndian, WriteBytesExt};
 use bytes_cast::{unaligned, BytesCast};
-use micro_timer::timed;
-use std::convert::TryFrom;
 
 /// Parents are stored in the dirstate as byte hashes.
 pub const PARENT_SIZE: usize = 20;
@@ -30,7 +28,7 @@
     Ok(parents)
 }
 
-#[timed]
+#[logging_timer::time("trace")]
 pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
     let mut copies = Vec::new();
     let mut entries = Vec::new();
--- a/rust/hg-core/src/dirstate_tree/dirstate_map.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,5 +1,4 @@
 use bytes_cast::BytesCast;
-use micro_timer::timed;
 use std::borrow::Cow;
 use std::path::PathBuf;
 
@@ -16,6 +15,7 @@
 use crate::dirstate::StateMapIter;
 use crate::dirstate::TruncatedTimestamp;
 use crate::matchers::Matcher;
+use crate::utils::filter_map_results;
 use crate::utils::hg_path::{HgPath, HgPathBuf};
 use crate::DirstateEntry;
 use crate::DirstateError;
@@ -346,9 +346,7 @@
         on_disk: &'on_disk [u8],
     ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
         match self {
-            NodeRef::InMemory(_path, node) => {
-                Ok(node.copy_source.as_ref().map(|s| &**s))
-            }
+            NodeRef::InMemory(_path, node) => Ok(node.copy_source.as_deref()),
             NodeRef::OnDisk(node) => node.copy_source(on_disk),
         }
     }
@@ -366,9 +364,9 @@
                     Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory),
                 })
             }
-            NodeRef::OnDisk(node) => node
-                .copy_source(on_disk)?
-                .map(|source| BorrowedPath::OnDisk(source)),
+            NodeRef::OnDisk(node) => {
+                node.copy_source(on_disk)?.map(BorrowedPath::OnDisk)
+            }
         })
     }
 
@@ -444,10 +442,7 @@
 
 impl NodeData {
     fn has_entry(&self) -> bool {
-        match self {
-            NodeData::Entry(_) => true,
-            _ => false,
-        }
+        matches!(self, NodeData::Entry(_))
     }
 
     fn as_entry(&self) -> Option<&DirstateEntry> {
@@ -482,7 +477,7 @@
         }
     }
 
-    #[timed]
+    #[logging_timer::time("trace")]
     pub fn new_v2(
         on_disk: &'on_disk [u8],
         data_size: usize,
@@ -497,7 +492,7 @@
         }
     }
 
-    #[timed]
+    #[logging_timer::time("trace")]
     pub fn new_v1(
         on_disk: &'on_disk [u8],
         identity: Option<u64>,
@@ -541,7 +536,7 @@
                 Ok(())
             },
         )?;
-        let parents = Some(parents.clone());
+        let parents = Some(*parents);
         map.identity = identity;
 
         Ok((map, parents))
@@ -695,6 +690,7 @@
         }
     }
 
+    #[allow(clippy::too_many_arguments)]
     fn reset_state(
         &mut self,
         filename: &HgPath,
@@ -720,10 +716,8 @@
                         .checked_sub(1)
                         .expect("tracked count to be >= 0");
                 }
-            } else {
-                if wc_tracked {
-                    ancestor.tracked_descendants_count += 1;
-                }
+            } else if wc_tracked {
+                ancestor.tracked_descendants_count += 1;
             }
         })?;
 
@@ -773,7 +767,7 @@
             ancestor.tracked_descendants_count += tracked_count_increment;
         })?;
         if let Some(old_entry) = old_entry_opt {
-            let mut e = old_entry.clone();
+            let mut e = old_entry;
             if e.tracked() {
                 // XXX
                 // This is probably overkill for more case, but we need this to
@@ -814,7 +808,7 @@
                     .expect("tracked_descendants_count should be >= 0");
             })?
             .expect("node should exist");
-        let mut new_entry = old_entry.clone();
+        let mut new_entry = old_entry;
         new_entry.set_untracked();
         node.data = NodeData::Entry(new_entry);
         Ok(())
@@ -842,7 +836,7 @@
                 }
             })?
             .expect("node should exist");
-        let mut new_entry = old_entry.clone();
+        let mut new_entry = old_entry;
         new_entry.set_clean(mode, size, mtime);
         node.data = NodeData::Entry(new_entry);
         Ok(())
@@ -951,7 +945,7 @@
         })
     }
 
-    fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
+    fn count_dropped_path(unreachable_bytes: &mut u32, path: Cow<HgPath>) {
         if let Cow::Borrowed(path) = path {
             *unreachable_bytes += path.len() as u32
         }
@@ -962,25 +956,7 @@
     }
 }
 
-/// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
-///
-/// The callback is only called for incoming `Ok` values. Errors are passed
-/// through as-is. In order to let it use the `?` operator the callback is
-/// expected to return a `Result` of `Option`, instead of an `Option` of
-/// `Result`.
-fn filter_map_results<'a, I, F, A, B, E>(
-    iter: I,
-    f: F,
-) -> impl Iterator<Item = Result<B, E>> + 'a
-where
-    I: Iterator<Item = Result<A, E>> + 'a,
-    F: Fn(A) -> Result<Option<B>, E> + 'a,
-{
-    iter.filter_map(move |result| match result {
-        Ok(node) => f(node).transpose(),
-        Err(e) => Some(Err(e)),
-    })
-}
+type DebugDirstateTuple<'a> = (&'a HgPath, (u8, i32, i32, i32));
 
 impl OwningDirstateMap {
     pub fn clear(&mut self) {
@@ -1167,7 +1143,10 @@
                 }
                 let mut had_copy_source = false;
                 if let Some(source) = &node.copy_source {
-                    DirstateMap::count_dropped_path(unreachable_bytes, source);
+                    DirstateMap::count_dropped_path(
+                        unreachable_bytes,
+                        Cow::Borrowed(source),
+                    );
                     had_copy_source = true;
                     node.copy_source = None
                 }
@@ -1187,7 +1166,7 @@
                     nodes.remove_entry(first_path_component).unwrap();
                 DirstateMap::count_dropped_path(
                     unreachable_bytes,
-                    key.full_path(),
+                    Cow::Borrowed(key.full_path()),
                 )
             }
             Ok(Some((dropped, remove)))
@@ -1251,7 +1230,7 @@
         })
     }
 
-    #[timed]
+    #[logging_timer::time("trace")]
     pub fn pack_v1(
         &self,
         parents: DirstateParents,
@@ -1291,7 +1270,7 @@
     /// appended to the existing data file whose content is at
     /// `map.on_disk` (true), instead of written to a new data file
     /// (false), and the previous size of data on disk.
-    #[timed]
+    #[logging_timer::time("trace")]
     pub fn pack_v2(
         &self,
         write_mode: DirstateMapWriteMode,
@@ -1386,7 +1365,10 @@
                     *count = count
                         .checked_sub(1)
                         .expect("nodes_with_copy_source_count should be >= 0");
-                    DirstateMap::count_dropped_path(unreachable_bytes, source);
+                    DirstateMap::count_dropped_path(
+                        unreachable_bytes,
+                        Cow::Borrowed(source),
+                    );
                 }
                 node.copy_source.take().map(Cow::into_owned)
             }))
@@ -1399,7 +1381,7 @@
         value: &HgPath,
     ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
         self.with_dmap_mut(|map| {
-            let node = map.get_or_insert_node(&key, |_ancestor| {})?;
+            let node = map.get_or_insert_node(key, |_ancestor| {})?;
             let had_copy_source = node.copy_source.is_none();
             let old = node
                 .copy_source
@@ -1417,6 +1399,10 @@
         map.nodes_with_entry_count as usize
     }
 
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
     pub fn contains_key(
         &self,
         key: &HgPath,
@@ -1510,12 +1496,8 @@
         &self,
         all: bool,
     ) -> Box<
-        dyn Iterator<
-                Item = Result<
-                    (&HgPath, (u8, i32, i32, i32)),
-                    DirstateV2ParseError,
-                >,
-            > + Send
+        dyn Iterator<Item = Result<DebugDirstateTuple, DirstateV2ParseError>>
+            + Send
             + '_,
     > {
         let map = self.get_map();
@@ -1901,11 +1883,8 @@
         map.set_untracked(p(b"some/nested/removed"))?;
         assert_eq!(map.get_map().unreachable_bytes, 0);
 
-        match map.get_map().root {
-            ChildNodes::InMemory(_) => {
-                panic!("root should not have been mutated")
-            }
-            _ => (),
+        if let ChildNodes::InMemory(_) = map.get_map().root {
+            panic!("root should not have been mutated")
         }
         // We haven't mutated enough (nothing, actually), we should still be in
         // the append strategy
@@ -1916,9 +1895,8 @@
         let unreachable_bytes = map.get_map().unreachable_bytes;
         assert!(unreachable_bytes > 0);
 
-        match map.get_map().root {
-            ChildNodes::OnDisk(_) => panic!("root should have been mutated"),
-            _ => (),
+        if let ChildNodes::OnDisk(_) = map.get_map().root {
+            panic!("root should have been mutated")
         }
 
         // This should not mutate the structure either, since `root` has
@@ -1926,22 +1904,20 @@
         map.set_untracked(p(b"merged"))?;
         assert_eq!(map.get_map().unreachable_bytes, unreachable_bytes);
 
-        match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
-            NodeRef::InMemory(_, _) => {
-                panic!("'other/added_with_p2' should not have been mutated")
-            }
-            _ => (),
+        if let NodeRef::InMemory(_, _) =
+            map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap()
+        {
+            panic!("'other/added_with_p2' should not have been mutated")
         }
         // But this should, since it's in a different path
         // than `<root>some/nested/add`
         map.set_untracked(p(b"other/added_with_p2"))?;
         assert!(map.get_map().unreachable_bytes > unreachable_bytes);
 
-        match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
-            NodeRef::OnDisk(_) => {
-                panic!("'other/added_with_p2' should have been mutated")
-            }
-            _ => (),
+        if let NodeRef::OnDisk(_) =
+            map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap()
+        {
+            panic!("'other/added_with_p2' should have been mutated")
         }
 
         // We have rewritten most of the tree, we should create a new file
--- a/rust/hg-core/src/dirstate_tree/on_disk.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/on_disk.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -19,7 +19,6 @@
 use format_bytes::format_bytes;
 use rand::Rng;
 use std::borrow::Cow;
-use std::convert::{TryFrom, TryInto};
 use std::fmt::Write;
 
 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
@@ -249,11 +248,9 @@
     pub fn parents(&self) -> DirstateParents {
         use crate::Node;
         let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
-            .unwrap()
-            .clone();
+            .unwrap();
         let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
-            .unwrap()
-            .clone();
+            .unwrap();
         DirstateParents { p1, p2 }
     }
 
@@ -330,7 +327,7 @@
         read_hg_path(on_disk, self.full_path)
     }
 
-    pub(super) fn base_name_start<'on_disk>(
+    pub(super) fn base_name_start(
         &self,
     ) -> Result<usize, DirstateV2ParseError> {
         let start = self.base_name_start.get();
@@ -363,7 +360,7 @@
         ))
     }
 
-    pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
+    pub(super) fn has_copy_source(&self) -> bool {
         self.copy_source.start.get() != 0
     }
 
@@ -422,12 +419,12 @@
         } else {
             libc::S_IFREG
         };
-        let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
+        let permissions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
             0o755
         } else {
             0o644
         };
-        (file_type | permisions).into()
+        file_type | permissions
     }
 
     fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
@@ -609,32 +606,6 @@
         .map(|(slice, _rest)| slice)
 }
 
-pub(crate) fn for_each_tracked_path<'on_disk>(
-    on_disk: &'on_disk [u8],
-    metadata: &[u8],
-    mut f: impl FnMut(&'on_disk HgPath),
-) -> Result<(), DirstateV2ParseError> {
-    let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
-        DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
-    })?;
-    fn recur<'on_disk>(
-        on_disk: &'on_disk [u8],
-        nodes: ChildNodes,
-        f: &mut impl FnMut(&'on_disk HgPath),
-    ) -> Result<(), DirstateV2ParseError> {
-        for node in read_nodes(on_disk, nodes)? {
-            if let Some(entry) = node.entry()? {
-                if entry.tracked() {
-                    f(node.full_path(on_disk)?)
-                }
-            }
-            recur(on_disk, node.children, f)?
-        }
-        Ok(())
-    }
-    recur(on_disk, meta.root_nodes, &mut f)
-}
-
 /// Returns new data and metadata, together with whether that data should be
 /// appended to the existing data file whose content is at
 /// `dirstate_map.on_disk` (true), instead of written to a new data file
--- a/rust/hg-core/src/dirstate_tree/owning.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/owning.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -24,7 +24,7 @@
 
         OwningDirstateMapBuilder {
             on_disk,
-            map_builder: |bytes| DirstateMap::empty(&bytes),
+            map_builder: |bytes| DirstateMap::empty(bytes),
         }
         .build()
     }
@@ -43,7 +43,7 @@
             OwningDirstateMapTryBuilder {
                 on_disk,
                 map_builder: |bytes| {
-                    DirstateMap::new_v1(&bytes, identity).map(|(dmap, p)| {
+                    DirstateMap::new_v1(bytes, identity).map(|(dmap, p)| {
                         parents = p.unwrap_or(DirstateParents::NULL);
                         dmap
                     })
@@ -69,9 +69,7 @@
         OwningDirstateMapTryBuilder {
             on_disk,
             map_builder: |bytes| {
-                DirstateMap::new_v2(
-                    &bytes, data_size, metadata, uuid, identity,
-                )
+                DirstateMap::new_v2(bytes, data_size, metadata, uuid, identity)
             },
         }
         .try_build()
--- a/rust/hg-core/src/dirstate_tree/status.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/status.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -15,12 +15,10 @@
 use crate::utils::hg_path::HgPath;
 use crate::BadMatch;
 use crate::DirstateStatus;
-use crate::HgPathBuf;
 use crate::HgPathCow;
 use crate::PatternFileWarning;
 use crate::StatusError;
 use crate::StatusOptions;
-use micro_timer::timed;
 use once_cell::sync::OnceCell;
 use rayon::prelude::*;
 use sha1::{Digest, Sha1};
@@ -40,7 +38,7 @@
 /// and its use of `itertools::merge_join_by`. When reaching a path that only
 /// exists in one of the two trees, depending on information requested by
 /// `options` we may need to traverse the remaining subtree.
-#[timed]
+#[logging_timer::time("trace")]
 pub fn status<'dirstate>(
     dmap: &'dirstate mut DirstateMap,
     matcher: &(dyn Matcher + Sync),
@@ -147,7 +145,6 @@
     let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
     let has_ignored_ancestor = HasIgnoredAncestor::create(None, hg_path);
     let root_cached_mtime = None;
-    let root_dir_metadata = None;
     // If the path we have for the repository root is a symlink, do follow it.
     // (As opposed to symlinks within the working directory which are not
     // followed, using `std::fs::symlink_metadata`.)
@@ -155,8 +152,12 @@
         &has_ignored_ancestor,
         dmap.root.as_ref(),
         hg_path,
-        &root_dir,
-        root_dir_metadata,
+        &DirEntry {
+            hg_path: Cow::Borrowed(HgPath::new(b"")),
+            fs_path: Cow::Borrowed(root_dir),
+            symlink_metadata: None,
+            file_type: FakeFileType::Directory,
+        },
         root_cached_mtime,
         is_at_repo_root,
     )?;
@@ -244,7 +245,7 @@
             None => false,
             Some(parent) => {
                 *(parent.cache.get_or_init(|| {
-                    parent.force(ignore_fn) || ignore_fn(&self.path)
+                    parent.force(ignore_fn) || ignore_fn(self.path)
                 }))
             }
         }
@@ -340,7 +341,7 @@
     /// need to call `read_dir`.
     fn can_skip_fs_readdir(
         &self,
-        directory_metadata: Option<&std::fs::Metadata>,
+        directory_entry: &DirEntry,
         cached_directory_mtime: Option<TruncatedTimestamp>,
     ) -> bool {
         if !self.options.list_unknown && !self.options.list_ignored {
@@ -356,9 +357,9 @@
                 // The dirstate contains a cached mtime for this directory, set
                 // by a previous run of the `status` algorithm which found this
                 // directory eligible for `read_dir` caching.
-                if let Some(meta) = directory_metadata {
+                if let Ok(meta) = directory_entry.symlink_metadata() {
                     if cached_mtime
-                        .likely_equal_to_mtime_of(meta)
+                        .likely_equal_to_mtime_of(&meta)
                         .unwrap_or(false)
                     {
                         // The mtime of that directory has not changed
@@ -379,33 +380,48 @@
         has_ignored_ancestor: &'ancestor HasIgnoredAncestor<'ancestor>,
         dirstate_nodes: ChildNodesRef<'tree, 'on_disk>,
         directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
-        directory_fs_path: &Path,
-        directory_metadata: Option<&std::fs::Metadata>,
+        directory_entry: &DirEntry,
         cached_directory_mtime: Option<TruncatedTimestamp>,
         is_at_repo_root: bool,
     ) -> Result<bool, DirstateV2ParseError> {
-        if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
-        {
+        if self.can_skip_fs_readdir(directory_entry, cached_directory_mtime) {
             dirstate_nodes
                 .par_iter()
                 .map(|dirstate_node| {
-                    let fs_path = directory_fs_path.join(get_path_from_bytes(
+                    let fs_path = &directory_entry.fs_path;
+                    let fs_path = fs_path.join(get_path_from_bytes(
                         dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
                     ));
                     match std::fs::symlink_metadata(&fs_path) {
-                        Ok(fs_metadata) => self.traverse_fs_and_dirstate(
-                            &fs_path,
-                            &fs_metadata,
-                            dirstate_node,
-                            has_ignored_ancestor,
-                        ),
+                        Ok(fs_metadata) => {
+                            let file_type =
+                                match fs_metadata.file_type().try_into() {
+                                    Ok(file_type) => file_type,
+                                    Err(_) => return Ok(()),
+                                };
+                            let entry = DirEntry {
+                                hg_path: Cow::Borrowed(
+                                    dirstate_node
+                                        .full_path(self.dmap.on_disk)?,
+                                ),
+                                fs_path: Cow::Borrowed(&fs_path),
+                                symlink_metadata: Some(fs_metadata),
+                                file_type,
+                            };
+                            self.traverse_fs_and_dirstate(
+                                &entry,
+                                dirstate_node,
+                                has_ignored_ancestor,
+                            )
+                        }
                         Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
                             self.traverse_dirstate_only(dirstate_node)
                         }
                         Err(error) => {
                             let hg_path =
                                 dirstate_node.full_path(self.dmap.on_disk)?;
-                            Ok(self.io_error(error, hg_path))
+                            self.io_error(error, hg_path);
+                            Ok(())
                         }
                     }
                 })
@@ -419,7 +435,7 @@
 
         let mut fs_entries = if let Ok(entries) = self.read_dir(
             directory_hg_path,
-            directory_fs_path,
+            &directory_entry.fs_path,
             is_at_repo_root,
         ) {
             entries
@@ -435,7 +451,7 @@
         let dirstate_nodes = dirstate_nodes.sorted();
         // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
         // https://github.com/rust-lang/rust/issues/34162
-        fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
+        fs_entries.sort_unstable_by(|e1, e2| e1.hg_path.cmp(&e2.hg_path));
 
         // Propagate here any error that would happen inside the comparison
         // callback below
@@ -451,35 +467,31 @@
                 dirstate_node
                     .base_name(self.dmap.on_disk)
                     .unwrap()
-                    .cmp(&fs_entry.base_name)
+                    .cmp(&fs_entry.hg_path)
             },
         )
         .par_bridge()
         .map(|pair| {
             use itertools::EitherOrBoth::*;
-            let has_dirstate_node_or_is_ignored;
-            match pair {
+            let has_dirstate_node_or_is_ignored = match pair {
                 Both(dirstate_node, fs_entry) => {
                     self.traverse_fs_and_dirstate(
-                        &fs_entry.full_path,
-                        &fs_entry.metadata,
+                        fs_entry,
                         dirstate_node,
                         has_ignored_ancestor,
                     )?;
-                    has_dirstate_node_or_is_ignored = true
+                    true
                 }
                 Left(dirstate_node) => {
                     self.traverse_dirstate_only(dirstate_node)?;
-                    has_dirstate_node_or_is_ignored = true;
+                    true
                 }
-                Right(fs_entry) => {
-                    has_dirstate_node_or_is_ignored = self.traverse_fs_only(
-                        has_ignored_ancestor.force(&self.ignore_fn),
-                        directory_hg_path,
-                        fs_entry,
-                    )
-                }
-            }
+                Right(fs_entry) => self.traverse_fs_only(
+                    has_ignored_ancestor.force(&self.ignore_fn),
+                    directory_hg_path,
+                    fs_entry,
+                ),
+            };
             Ok(has_dirstate_node_or_is_ignored)
         })
         .try_reduce(|| true, |a, b| Ok(a && b))
@@ -487,23 +499,21 @@
 
     fn traverse_fs_and_dirstate<'ancestor>(
         &self,
-        fs_path: &Path,
-        fs_metadata: &std::fs::Metadata,
+        fs_entry: &DirEntry,
         dirstate_node: NodeRef<'tree, 'on_disk>,
         has_ignored_ancestor: &'ancestor HasIgnoredAncestor<'ancestor>,
     ) -> Result<(), DirstateV2ParseError> {
         let outdated_dircache =
             self.check_for_outdated_directory_cache(&dirstate_node)?;
         let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
-        let file_type = fs_metadata.file_type();
-        let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+        let file_or_symlink = fs_entry.is_file() || fs_entry.is_symlink();
         if !file_or_symlink {
             // If we previously had a file here, it was removed (with
             // `hg rm` or similar) or deleted before it could be
             // replaced by a directory or something else.
             self.mark_removed_or_deleted_if_file(&dirstate_node)?;
         }
-        if file_type.is_dir() {
+        if fs_entry.is_dir() {
             if self.options.collect_traversed_dirs {
                 self.outcome
                     .lock()
@@ -512,7 +522,7 @@
                     .push(hg_path.detach_from_tree())
             }
             let is_ignored = HasIgnoredAncestor::create(
-                Some(&has_ignored_ancestor),
+                Some(has_ignored_ancestor),
                 hg_path,
             );
             let is_at_repo_root = false;
@@ -521,26 +531,25 @@
                     &is_ignored,
                     dirstate_node.children(self.dmap.on_disk)?,
                     hg_path,
-                    fs_path,
-                    Some(fs_metadata),
+                    fs_entry,
                     dirstate_node.cached_directory_mtime()?,
                     is_at_repo_root,
                 )?;
             self.maybe_save_directory_mtime(
                 children_all_have_dirstate_node_or_are_ignored,
-                fs_metadata,
+                fs_entry,
                 dirstate_node,
                 outdated_dircache,
             )?
         } else {
-            if file_or_symlink && self.matcher.matches(&hg_path) {
+            if file_or_symlink && self.matcher.matches(hg_path) {
                 if let Some(entry) = dirstate_node.entry()? {
                     if !entry.any_tracked() {
                         // Forward-compat if we start tracking unknown/ignored
                         // files for caching reasons
                         self.mark_unknown_or_ignored(
                             has_ignored_ancestor.force(&self.ignore_fn),
-                            &hg_path,
+                            hg_path,
                         );
                     }
                     if entry.added() {
@@ -550,7 +559,7 @@
                     } else if entry.modified() {
                         self.push_outcome(Outcome::Modified, &dirstate_node)?;
                     } else {
-                        self.handle_normal_file(&dirstate_node, fs_metadata)?;
+                        self.handle_normal_file(&dirstate_node, fs_entry)?;
                     }
                 } else {
                     // `node.entry.is_none()` indicates a "directory"
@@ -578,7 +587,7 @@
     fn maybe_save_directory_mtime(
         &self,
         children_all_have_dirstate_node_or_are_ignored: bool,
-        directory_metadata: &std::fs::Metadata,
+        directory_entry: &DirEntry,
         dirstate_node: NodeRef<'tree, 'on_disk>,
         outdated_directory_cache: bool,
     ) -> Result<(), DirstateV2ParseError> {
@@ -605,14 +614,17 @@
         // resolution based on the filesystem (for example ext3
         // only stores integer seconds), kernel (see
         // https://stackoverflow.com/a/14393315/1162888), etc.
-        let directory_mtime = if let Ok(option) =
-            TruncatedTimestamp::for_reliable_mtime_of(
-                directory_metadata,
-                status_start,
-            ) {
-            if let Some(directory_mtime) = option {
-                directory_mtime
-            } else {
+        let metadata = match directory_entry.symlink_metadata() {
+            Ok(meta) => meta,
+            Err(_) => return Ok(()),
+        };
+
+        let directory_mtime = match TruncatedTimestamp::for_reliable_mtime_of(
+            &metadata,
+            status_start,
+        ) {
+            Ok(Some(directory_mtime)) => directory_mtime,
+            Ok(None) => {
                 // The directory was modified too recently,
                 // don’t cache its `read_dir` results.
                 //
@@ -630,9 +642,10 @@
                 // by the same script.
                 return Ok(());
             }
-        } else {
-            // OS/libc does not support mtime?
-            return Ok(());
+            Err(_) => {
+                // OS/libc does not support mtime?
+                return Ok(());
+            }
         };
         // We’ve observed (through `status_start`) that time has
         // “progressed” since `directory_mtime`, so any further
@@ -671,18 +684,23 @@
     fn handle_normal_file(
         &self,
         dirstate_node: &NodeRef<'tree, 'on_disk>,
-        fs_metadata: &std::fs::Metadata,
+        fs_entry: &DirEntry,
     ) -> Result<(), DirstateV2ParseError> {
         // Keep the low 31 bits
         fn truncate_u64(value: u64) -> i32 {
             (value & 0x7FFF_FFFF) as i32
         }
 
+        let fs_metadata = match fs_entry.symlink_metadata() {
+            Ok(meta) => meta,
+            Err(_) => return Ok(()),
+        };
+
         let entry = dirstate_node
             .entry()?
             .expect("handle_normal_file called with entry-less node");
         let mode_changed =
-            || self.options.check_exec && entry.mode_changed(fs_metadata);
+            || self.options.check_exec && entry.mode_changed(&fs_metadata);
         let size = entry.size();
         let size_changed = size != truncate_u64(fs_metadata.len());
         if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
@@ -695,19 +713,20 @@
         {
             self.push_outcome(Outcome::Modified, dirstate_node)?
         } else {
-            let mtime_looks_clean;
-            if let Some(dirstate_mtime) = entry.truncated_mtime() {
-                let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata)
+            let mtime_looks_clean = if let Some(dirstate_mtime) =
+                entry.truncated_mtime()
+            {
+                let fs_mtime = TruncatedTimestamp::for_mtime_of(&fs_metadata)
                     .expect("OS/libc does not support mtime?");
                 // There might be a change in the future if for example the
                 // internal clock become off while process run, but this is a
                 // case where the issues the user would face
                 // would be a lot worse and there is nothing we
                 // can really do.
-                mtime_looks_clean = fs_mtime.likely_equal(dirstate_mtime)
+                fs_mtime.likely_equal(dirstate_mtime)
             } else {
                 // No mtime in the dirstate entry
-                mtime_looks_clean = false
+                false
             };
             if !mtime_looks_clean {
                 self.push_outcome(Outcome::Unsure, dirstate_node)?
@@ -751,7 +770,7 @@
                 if entry.removed() {
                     self.push_outcome(Outcome::Removed, dirstate_node)?
                 } else {
-                    self.push_outcome(Outcome::Deleted, &dirstate_node)?
+                    self.push_outcome(Outcome::Deleted, dirstate_node)?
                 }
             }
         }
@@ -767,10 +786,9 @@
         directory_hg_path: &HgPath,
         fs_entry: &DirEntry,
     ) -> bool {
-        let hg_path = directory_hg_path.join(&fs_entry.base_name);
-        let file_type = fs_entry.metadata.file_type();
-        let file_or_symlink = file_type.is_file() || file_type.is_symlink();
-        if file_type.is_dir() {
+        let hg_path = directory_hg_path.join(&fs_entry.hg_path);
+        let file_or_symlink = fs_entry.is_file() || fs_entry.is_symlink();
+        if fs_entry.is_dir() {
             let is_ignored =
                 has_ignored_ancestor || (self.ignore_fn)(&hg_path);
             let traverse_children = if is_ignored {
@@ -783,11 +801,9 @@
             };
             if traverse_children {
                 let is_at_repo_root = false;
-                if let Ok(children_fs_entries) = self.read_dir(
-                    &hg_path,
-                    &fs_entry.full_path,
-                    is_at_repo_root,
-                ) {
+                if let Ok(children_fs_entries) =
+                    self.read_dir(&hg_path, &fs_entry.fs_path, is_at_repo_root)
+                {
                     children_fs_entries.par_iter().for_each(|child_fs_entry| {
                         self.traverse_fs_only(
                             is_ignored,
@@ -801,26 +817,24 @@
                 }
             }
             is_ignored
+        } else if file_or_symlink {
+            if self.matcher.matches(&hg_path) {
+                self.mark_unknown_or_ignored(
+                    has_ignored_ancestor,
+                    &BorrowedPath::InMemory(&hg_path),
+                )
+            } else {
+                // We haven’t computed whether this path is ignored. It
+                // might not be, and a future run of status might have a
+                // different matcher that matches it. So treat it as not
+                // ignored. That is, inhibit readdir caching of the parent
+                // directory.
+                false
+            }
         } else {
-            if file_or_symlink {
-                if self.matcher.matches(&hg_path) {
-                    self.mark_unknown_or_ignored(
-                        has_ignored_ancestor,
-                        &BorrowedPath::InMemory(&hg_path),
-                    )
-                } else {
-                    // We haven’t computed whether this path is ignored. It
-                    // might not be, and a future run of status might have a
-                    // different matcher that matches it. So treat it as not
-                    // ignored. That is, inhibit readdir caching of the parent
-                    // directory.
-                    false
-                }
-            } else {
-                // This is neither a directory, a plain file, or a symlink.
-                // Treat it like an ignored file.
-                true
-            }
+            // This is neither a directory, a plain file, or a symlink.
+            // Treat it like an ignored file.
+            true
         }
     }
 
@@ -830,7 +844,7 @@
         has_ignored_ancestor: bool,
         hg_path: &BorrowedPath<'_, 'on_disk>,
     ) -> bool {
-        let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+        let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
         if is_ignored {
             if self.options.list_ignored {
                 self.push_outcome_without_copy_source(
@@ -838,27 +852,53 @@
                     hg_path,
                 )
             }
-        } else {
-            if self.options.list_unknown {
-                self.push_outcome_without_copy_source(
-                    Outcome::Unknown,
-                    hg_path,
-                )
-            }
+        } else if self.options.list_unknown {
+            self.push_outcome_without_copy_source(Outcome::Unknown, hg_path)
         }
         is_ignored
     }
 }
 
-struct DirEntry {
-    base_name: HgPathBuf,
-    full_path: PathBuf,
-    metadata: std::fs::Metadata,
+/// Since [`std::fs::FileType`] cannot be built directly, we emulate what we
+/// care about.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum FakeFileType {
+    File,
+    Directory,
+    Symlink,
 }
 
-impl DirEntry {
-    /// Returns **unsorted** entries in the given directory, with name and
-    /// metadata.
+impl TryFrom<std::fs::FileType> for FakeFileType {
+    type Error = ();
+
+    fn try_from(f: std::fs::FileType) -> Result<Self, Self::Error> {
+        if f.is_dir() {
+            Ok(Self::Directory)
+        } else if f.is_file() {
+            Ok(Self::File)
+        } else if f.is_symlink() {
+            Ok(Self::Symlink)
+        } else {
+            // Things like FIFO etc.
+            Err(())
+        }
+    }
+}
+
+struct DirEntry<'a> {
+    /// Path as stored in the dirstate, or just the filename for optimization.
+    hg_path: HgPathCow<'a>,
+    /// Filesystem path
+    fs_path: Cow<'a, Path>,
+    /// Lazily computed
+    symlink_metadata: Option<std::fs::Metadata>,
+    /// Already computed for ergonomics.
+    file_type: FakeFileType,
+}
+
+impl<'a> DirEntry<'a> {
+    /// Returns **unsorted** entries in the given directory, with name,
+    /// metadata and file type.
     ///
     /// If a `.hg` sub-directory is encountered:
     ///
@@ -872,7 +912,7 @@
         let mut results = Vec::new();
         for entry in read_dir_path.read_dir()? {
             let entry = entry?;
-            let metadata = match entry.metadata() {
+            let file_type = match entry.file_type() {
                 Ok(v) => v,
                 Err(e) => {
                     // race with file deletion?
@@ -889,7 +929,7 @@
                 if is_at_repo_root {
                     // Skip the repo’s own .hg (might be a symlink)
                     continue;
-                } else if metadata.is_dir() {
+                } else if file_type.is_dir() {
                     // A .hg sub-directory at another location means a subrepo,
                     // skip it entirely.
                     return Ok(Vec::new());
@@ -900,15 +940,40 @@
             } else {
                 entry.path()
             };
-            let base_name = get_bytes_from_os_string(file_name).into();
+            let filename =
+                Cow::Owned(get_bytes_from_os_string(file_name).into());
+            let file_type = match FakeFileType::try_from(file_type) {
+                Ok(file_type) => file_type,
+                Err(_) => continue,
+            };
             results.push(DirEntry {
-                base_name,
-                full_path,
-                metadata,
+                hg_path: filename,
+                fs_path: Cow::Owned(full_path.to_path_buf()),
+                symlink_metadata: None,
+                file_type,
             })
         }
         Ok(results)
     }
+
+    fn symlink_metadata(&self) -> Result<std::fs::Metadata, std::io::Error> {
+        match &self.symlink_metadata {
+            Some(meta) => Ok(meta.clone()),
+            None => std::fs::symlink_metadata(&self.fs_path),
+        }
+    }
+
+    fn is_dir(&self) -> bool {
+        self.file_type == FakeFileType::Directory
+    }
+
+    fn is_file(&self) -> bool {
+        self.file_type == FakeFileType::File
+    }
+
+    fn is_symlink(&self) -> bool {
+        self.file_type == FakeFileType::Symlink
+    }
 }
 
 /// Return the `mtime` of a temporary file newly-created in the `.hg` directory
--- a/rust/hg-core/src/discovery.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/discovery.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -194,7 +194,7 @@
         size: usize,
     ) -> Vec<Revision> {
         if !self.randomize {
-            sample.sort();
+            sample.sort_unstable();
             sample.truncate(size);
             return sample;
         }
@@ -513,14 +513,14 @@
     ) -> Vec<Revision> {
         let mut as_vec: Vec<Revision> =
             disco.undecided.as_ref().unwrap().iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         as_vec
     }
 
     fn sorted_missing(disco: &PartialDiscovery<SampleGraph>) -> Vec<Revision> {
         let mut as_vec: Vec<Revision> =
             disco.missing.iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         as_vec
     }
 
@@ -529,7 +529,7 @@
     ) -> Result<Vec<Revision>, GraphError> {
         let mut as_vec: Vec<Revision> =
             disco.common_heads()?.iter().cloned().collect();
-        as_vec.sort();
+        as_vec.sort_unstable();
         Ok(as_vec)
     }
 
@@ -621,7 +621,7 @@
         disco.undecided = Some((1..=13).collect());
 
         let mut sample_vec = disco.take_quick_sample(vec![], 4)?;
-        sample_vec.sort();
+        sample_vec.sort_unstable();
         assert_eq!(sample_vec, vec![10, 11, 12, 13]);
         Ok(())
     }
@@ -632,7 +632,7 @@
         disco.ensure_undecided()?;
 
         let mut sample_vec = disco.take_quick_sample(vec![12], 4)?;
-        sample_vec.sort();
+        sample_vec.sort_unstable();
         // r12's only parent is r9, whose unique grand-parent through the
         // diamond shape is r4. This ends there because the distance from r4
         // to the root is only 3.
@@ -650,11 +650,11 @@
         assert_eq!(cache.get(&10).cloned(), None);
 
         let mut children_4 = cache.get(&4).cloned().unwrap();
-        children_4.sort();
+        children_4.sort_unstable();
         assert_eq!(children_4, vec![5, 6, 7]);
 
         let mut children_7 = cache.get(&7).cloned().unwrap();
-        children_7.sort();
+        children_7.sort_unstable();
         assert_eq!(children_7, vec![9, 11]);
 
         Ok(())
@@ -684,7 +684,7 @@
         let (sample_set, size) = disco.bidirectional_sample(7)?;
         assert_eq!(size, 7);
         let mut sample: Vec<Revision> = sample_set.into_iter().collect();
-        sample.sort();
+        sample.sort_unstable();
         // our DAG is a bit too small for the results to be really interesting
         // at least it shows that
         // - we went both ways
--- a/rust/hg-core/src/filepatterns.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/filepatterns.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -313,7 +313,7 @@
         PatternSyntax::RootGlob
         | PatternSyntax::Path
         | PatternSyntax::RelGlob
-        | PatternSyntax::RootFiles => normalize_path_bytes(&pattern),
+        | PatternSyntax::RootFiles => normalize_path_bytes(pattern),
         PatternSyntax::Include | PatternSyntax::SubInclude => {
             return Err(PatternError::NonRegexPattern(entry.clone()))
         }
@@ -368,7 +368,7 @@
     let mut warnings: Vec<PatternFileWarning> = vec![];
 
     let mut current_syntax =
-        default_syntax_override.unwrap_or(b"relre:".as_ref());
+        default_syntax_override.unwrap_or_else(|| b"relre:".as_ref());
 
     for (line_number, mut line) in lines.split(|c| *c == b'\n').enumerate() {
         let line_number = line_number + 1;
@@ -402,7 +402,7 @@
             continue;
         }
 
-        let mut line_syntax: &[u8] = &current_syntax;
+        let mut line_syntax: &[u8] = current_syntax;
 
         for (s, rels) in SYNTAXES.iter() {
             if let Some(rest) = line.drop_prefix(rels) {
@@ -418,7 +418,7 @@
         }
 
         inputs.push(IgnorePattern::new(
-            parse_pattern_syntax(&line_syntax).map_err(|e| match e {
+            parse_pattern_syntax(line_syntax).map_err(|e| match e {
                 PatternError::UnsupportedSyntax(syntax) => {
                     PatternError::UnsupportedSyntaxInFile(
                         syntax,
@@ -428,7 +428,7 @@
                 }
                 _ => e,
             })?,
-            &line,
+            line,
             file_path,
         ));
     }
@@ -502,7 +502,7 @@
                 }
                 PatternSyntax::SubInclude => {
                     let mut sub_include = SubInclude::new(
-                        &root_dir,
+                        root_dir,
                         &entry.pattern,
                         &entry.source,
                     )?;
@@ -564,11 +564,11 @@
         let prefix = canonical_path(root_dir, root_dir, new_root)?;
 
         Ok(Self {
-            prefix: path_to_hg_path_buf(prefix).and_then(|mut p| {
+            prefix: path_to_hg_path_buf(prefix).map(|mut p| {
                 if !p.is_empty() {
                     p.push_byte(b'/');
                 }
-                Ok(p)
+                p
             })?,
             path: path.to_owned(),
             root: new_root.to_owned(),
@@ -581,14 +581,14 @@
 /// phase.
 pub fn filter_subincludes(
     ignore_patterns: Vec<IgnorePattern>,
-) -> Result<(Vec<Box<SubInclude>>, Vec<IgnorePattern>), HgPathError> {
+) -> Result<(Vec<SubInclude>, Vec<IgnorePattern>), HgPathError> {
     let mut subincludes = vec![];
     let mut others = vec![];
 
     for pattern in ignore_patterns {
         if let PatternSyntax::ExpandedSubInclude(sub_include) = pattern.syntax
         {
-            subincludes.push(sub_include);
+            subincludes.push(*sub_include);
         } else {
             others.push(pattern)
         }
--- a/rust/hg-core/src/lib.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/lib.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -30,6 +30,7 @@
 pub mod repo;
 pub mod revlog;
 pub use revlog::*;
+pub mod checkexec;
 pub mod config;
 pub mod lock;
 pub mod logging;
@@ -47,10 +48,6 @@
 use std::fmt;
 use twox_hash::RandomXxHashBuilder64;
 
-/// This is a contract between the `micro-timer` crate and us, to expose
-/// the `log` crate as `crate::log`.
-use log;
-
 pub type LineNumber = usize;
 
 /// Rust's default hasher is too slow because it tries to prevent collision
--- a/rust/hg-core/src/lock.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/lock.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -2,7 +2,6 @@
 
 use crate::errors::HgError;
 use crate::errors::HgResultExt;
-use crate::utils::StrExt;
 use crate::vfs::Vfs;
 use std::io;
 use std::io::ErrorKind;
@@ -107,8 +106,8 @@
 /// running anymore.
 fn lock_should_be_broken(data: &Option<String>) -> bool {
     (|| -> Option<bool> {
-        let (prefix, pid) = data.as_ref()?.split_2(':')?;
-        if prefix != &*LOCK_PREFIX {
+        let (prefix, pid) = data.as_ref()?.split_once(':')?;
+        if prefix != *LOCK_PREFIX {
             return Some(false);
         }
         let process_is_running;
@@ -145,6 +144,8 @@
 
         /// Same as https://github.com/python/cpython/blob/v3.10.0/Modules/socketmodule.c#L5414
         const BUFFER_SIZE: usize = 1024;
+        // This cast is *needed* for platforms with signed chars
+        #[allow(clippy::unnecessary_cast)]
         let mut buffer = [0 as libc::c_char; BUFFER_SIZE];
         let hostname_bytes = unsafe {
             let result = libc::gethostname(buffer.as_mut_ptr(), BUFFER_SIZE);
--- a/rust/hg-core/src/matchers.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/matchers.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -27,12 +27,9 @@
 use std::borrow::ToOwned;
 use std::collections::HashSet;
 use std::fmt::{Display, Error, Formatter};
-use std::iter::FromIterator;
 use std::ops::Deref;
 use std::path::{Path, PathBuf};
 
-use micro_timer::timed;
-
 #[derive(Debug, PartialEq)]
 pub enum VisitChildrenSet {
     /// Don't visit anything
@@ -305,11 +302,11 @@
     }
 
     fn matches(&self, filename: &HgPath) -> bool {
-        (self.match_fn)(filename.as_ref())
+        (self.match_fn)(filename)
     }
 
     fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet {
-        let dir = directory.as_ref();
+        let dir = directory;
         if self.prefix && self.roots.contains(dir) {
             return VisitChildrenSet::Recursive;
         }
@@ -321,11 +318,11 @@
             return VisitChildrenSet::This;
         }
 
-        if self.parents.contains(directory.as_ref()) {
+        if self.parents.contains(dir.as_ref()) {
             let multiset = self.get_all_parents_children();
             if let Some(children) = multiset.get(dir) {
                 return VisitChildrenSet::Set(
-                    children.into_iter().map(HgPathBuf::from).collect(),
+                    children.iter().map(HgPathBuf::from).collect(),
                 );
             }
         }
@@ -449,7 +446,7 @@
                 VisitChildrenSet::This
             }
             (VisitChildrenSet::Set(m1), VisitChildrenSet::Set(m2)) => {
-                let set: HashSet<_> = m1.intersection(&m2).cloned().collect();
+                let set: HashSet<_> = m1.intersection(m2).cloned().collect();
                 if set.is_empty() {
                     VisitChildrenSet::Empty
                 } else {
@@ -612,7 +609,7 @@
 /// This can fail when the pattern is invalid or not supported by the
 /// underlying engine (the `regex` crate), for instance anything with
 /// back-references.
-#[timed]
+#[logging_timer::time("trace")]
 fn re_matcher(pattern: &[u8]) -> PatternResult<RegexMatcher> {
     use std::io::Write;
 
@@ -702,10 +699,9 @@
             PatternSyntax::RootGlob | PatternSyntax::Glob => {
                 let mut root = HgPathBuf::new();
                 for p in pattern.split(|c| *c == b'/') {
-                    if p.iter().any(|c| match *c {
-                        b'[' | b'{' | b'*' | b'?' => true,
-                        _ => false,
-                    }) {
+                    if p.iter()
+                        .any(|c| matches!(*c, b'[' | b'{' | b'*' | b'?'))
+                    {
                         break;
                     }
                     root.push(HgPathBuf::from_bytes(p).as_ref());
@@ -783,10 +779,10 @@
 
 /// Returns a function that checks whether a given file (in the general sense)
 /// should be matched.
-fn build_match<'a, 'b>(
+fn build_match<'a>(
     ignore_patterns: Vec<IgnorePattern>,
-) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> {
-    let mut match_funcs: Vec<IgnoreFnType<'b>> = vec![];
+) -> PatternResult<(Vec<u8>, IgnoreFnType<'a>)> {
+    let mut match_funcs: Vec<IgnoreFnType<'a>> = vec![];
     // For debugging and printing
     let mut patterns = vec![];
 
@@ -924,9 +920,8 @@
             dirs,
             parents,
         } = roots_dirs_and_parents(&ignore_patterns)?;
-        let prefix = ignore_patterns.iter().all(|k| match k.syntax {
-            PatternSyntax::Path | PatternSyntax::RelPath => true,
-            _ => false,
+        let prefix = ignore_patterns.iter().all(|k| {
+            matches!(k.syntax, PatternSyntax::Path | PatternSyntax::RelPath)
         });
         let (patterns, match_fn) = build_match(ignore_patterns)?;
 
--- a/rust/hg-core/src/narrow.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/narrow.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -37,12 +37,14 @@
     }
     // Treat "narrowspec does not exist" the same as "narrowspec file exists
     // and is empty".
-    let store_spec = repo.store_vfs().try_read(FILENAME)?.unwrap_or(vec![]);
-    let working_copy_spec =
-        repo.hg_vfs().try_read(DIRSTATE_FILENAME)?.unwrap_or(vec![]);
+    let store_spec = repo.store_vfs().try_read(FILENAME)?.unwrap_or_default();
+    let working_copy_spec = repo
+        .hg_vfs()
+        .try_read(DIRSTATE_FILENAME)?
+        .unwrap_or_default();
     if store_spec != working_copy_spec {
         return Err(HgError::abort(
-            "working copy's narrowspec is stale",
+            "abort: working copy's narrowspec is stale",
             exit_codes::STATE_ERROR,
             Some("run 'hg tracked --update-working-copy'".into()),
         )
--- a/rust/hg-core/src/operations/cat.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/operations/cat.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -6,8 +6,8 @@
 // GNU General Public License version 2 or any later version.
 
 use crate::repo::Repo;
-use crate::revlog::revlog::RevlogError;
 use crate::revlog::Node;
+use crate::revlog::RevlogError;
 
 use crate::utils::hg_path::HgPath;
 
@@ -53,10 +53,13 @@
     }
 }
 
+// Tuple of (missing, found) paths in the manifest
+type ManifestQueryResponse<'a> = (Vec<(&'a HgPath, Node)>, Vec<&'a HgPath>);
+
 fn find_files_in_manifest<'query>(
     manifest: &Manifest,
     query: impl Iterator<Item = &'query HgPath>,
-) -> Result<(Vec<(&'query HgPath, Node)>, Vec<&'query HgPath>), HgError> {
+) -> Result<ManifestQueryResponse<'query>, HgError> {
     let mut manifest = put_back(manifest.iter());
     let mut res = vec![];
     let mut missing = vec![];
@@ -67,7 +70,7 @@
             Some(item) => res.push((file, item)),
         }
     }
-    return Ok((res, missing));
+    Ok((res, missing))
 }
 
 /// Output the given revision of files
@@ -91,10 +94,8 @@
 
     files.sort_unstable();
 
-    let (found, missing) = find_files_in_manifest(
-        &manifest,
-        files.into_iter().map(|f| f.as_ref()),
-    )?;
+    let (found, missing) =
+        find_files_in_manifest(&manifest, files.into_iter())?;
 
     for (file_path, file_node) in found {
         found_any = true;
--- a/rust/hg-core/src/operations/debugdata.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/operations/debugdata.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -7,7 +7,7 @@
 
 use crate::repo::Repo;
 use crate::requirements;
-use crate::revlog::revlog::{Revlog, RevlogError};
+use crate::revlog::{Revlog, RevlogError};
 
 /// Kind of data to debug
 #[derive(Debug, Copy, Clone)]
--- a/rust/hg-core/src/operations/list_tracked_files.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/operations/list_tracked_files.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -5,78 +5,41 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::dirstate::parsers::parse_dirstate_entries;
-use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
 use crate::errors::HgError;
+use crate::matchers::Matcher;
 use crate::repo::Repo;
 use crate::revlog::manifest::Manifest;
-use crate::revlog::revlog::RevlogError;
+use crate::revlog::RevlogError;
+use crate::utils::filter_map_results;
 use crate::utils::hg_path::HgPath;
-use crate::DirstateError;
-use rayon::prelude::*;
-
-/// List files under Mercurial control in the working directory
-/// by reading the dirstate
-pub struct Dirstate {
-    /// The `dirstate` content.
-    content: Vec<u8>,
-    v2_metadata: Option<Vec<u8>>,
-}
-
-impl Dirstate {
-    pub fn new(repo: &Repo) -> Result<Self, HgError> {
-        let mut content = repo.hg_vfs().read("dirstate")?;
-        let v2_metadata = if repo.has_dirstate_v2() {
-            let docket = read_docket(&content)?;
-            let meta = docket.tree_metadata().to_vec();
-            content = repo.hg_vfs().read(docket.data_filename())?;
-            Some(meta)
-        } else {
-            None
-        };
-        Ok(Self {
-            content,
-            v2_metadata,
-        })
-    }
-
-    pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
-        let mut files = Vec::new();
-        if !self.content.is_empty() {
-            if let Some(meta) = &self.v2_metadata {
-                for_each_tracked_path(&self.content, meta, |path| {
-                    files.push(path)
-                })?
-            } else {
-                let _parents = parse_dirstate_entries(
-                    &self.content,
-                    |path, entry, _copy_source| {
-                        if entry.tracked() {
-                            files.push(path)
-                        }
-                        Ok(())
-                    },
-                )?;
-            }
-        }
-        files.par_sort_unstable();
-        Ok(files)
-    }
-}
 
 /// List files under Mercurial control at a given revision.
 pub fn list_rev_tracked_files(
     repo: &Repo,
     revset: &str,
+    narrow_matcher: Box<dyn Matcher>,
 ) -> Result<FilesForRev, RevlogError> {
     let rev = crate::revset::resolve_single(revset, repo)?;
-    Ok(FilesForRev(repo.manifest_for_rev(rev)?))
+    Ok(FilesForRev {
+        manifest: repo.manifest_for_rev(rev)?,
+        narrow_matcher,
+    })
 }
 
-pub struct FilesForRev(Manifest);
+pub struct FilesForRev {
+    manifest: Manifest,
+    narrow_matcher: Box<dyn Matcher>,
+}
 
 impl FilesForRev {
     pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> {
-        self.0.iter().map(|entry| Ok(entry?.path))
+        filter_map_results(self.manifest.iter(), |entry| {
+            let path = entry.path;
+            Ok(if self.narrow_matcher.matches(path) {
+                Some(path)
+            } else {
+                None
+            })
+        })
     }
 }
--- a/rust/hg-core/src/operations/mod.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/operations/mod.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -7,5 +7,4 @@
 mod list_tracked_files;
 pub use cat::{cat, CatOutput};
 pub use debugdata::{debug_data, DebugDataKind};
-pub use list_tracked_files::Dirstate;
 pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
--- a/rust/hg-core/src/repo.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/repo.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -9,7 +9,7 @@
 use crate::lock::{try_with_lock_no_wait, LockError};
 use crate::manifest::{Manifest, Manifestlog};
 use crate::revlog::filelog::Filelog;
-use crate::revlog::revlog::RevlogError;
+use crate::revlog::RevlogError;
 use crate::utils::debug::debug_wait_for_file_or_print;
 use crate::utils::files::get_path_from_bytes;
 use crate::utils::hg_path::HgPath;
@@ -26,6 +26,8 @@
 
 const V2_MAX_READ_ATTEMPTS: usize = 5;
 
+type DirstateMapIdentity = (Option<u64>, Option<Vec<u8>>, usize);
+
 /// A repository on disk
 pub struct Repo {
     working_directory: PathBuf,
@@ -71,9 +73,9 @@
                 return Ok(ancestor.to_path_buf());
             }
         }
-        return Err(RepoError::NotFound {
+        Err(RepoError::NotFound {
             at: current_directory,
-        });
+        })
     }
 
     /// Find a repository, either at the given path (which must contain a `.hg`
@@ -90,13 +92,11 @@
     ) -> Result<Self, RepoError> {
         if let Some(root) = explicit_path {
             if is_dir(root.join(".hg"))? {
-                Self::new_at_path(root.to_owned(), config)
+                Self::new_at_path(root, config)
             } else if is_file(&root)? {
                 Err(HgError::unsupported("bundle repository").into())
             } else {
-                Err(RepoError::NotFound {
-                    at: root.to_owned(),
-                })
+                Err(RepoError::NotFound { at: root })
             }
         } else {
             let root = Self::find_repo_root()?;
@@ -111,9 +111,8 @@
     ) -> Result<Self, RepoError> {
         let dot_hg = working_directory.join(".hg");
 
-        let mut repo_config_files = Vec::new();
-        repo_config_files.push(dot_hg.join("hgrc"));
-        repo_config_files.push(dot_hg.join("hgrc-not-shared"));
+        let mut repo_config_files =
+            vec![dot_hg.join("hgrc"), dot_hg.join("hgrc-not-shared")];
 
         let hg_vfs = Vfs { base: &dot_hg };
         let mut reqs = requirements::load_if_exists(hg_vfs)?;
@@ -256,7 +255,7 @@
             .hg_vfs()
             .read("dirstate")
             .io_not_found_as_none()?
-            .unwrap_or(Vec::new()))
+            .unwrap_or_default())
     }
 
     fn dirstate_identity(&self) -> Result<Option<u64>, HgError> {
@@ -283,8 +282,7 @@
                 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
             docket.parents()
         } else {
-            crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
-                .clone()
+            *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
         };
         self.dirstate_parents.set(parents);
         Ok(parents)
@@ -296,7 +294,7 @@
     /// Namely, the inode, data file uuid and the data size.
     fn get_dirstate_data_file_integrity(
         &self,
-    ) -> Result<(Option<u64>, Option<Vec<u8>>, usize), HgError> {
+    ) -> Result<DirstateMapIdentity, HgError> {
         assert!(
             self.has_dirstate_v2(),
             "accessing dirstate data file ID without dirstate-v2"
@@ -345,7 +343,7 @@
                             );
                             continue;
                         }
-                        _ => return Err(e.into()),
+                        _ => return Err(e),
                     },
                 }
             }
@@ -354,7 +352,7 @@
                 255,
                 None,
             );
-            return Err(DirstateError::Common(error));
+            Err(DirstateError::Common(error))
         } else {
             debug_wait_for_file_or_print(
                 self.config(),
@@ -362,7 +360,7 @@
             );
             let identity = self.dirstate_identity()?;
             let dirstate_file_contents = self.dirstate_file_contents()?;
-            return if dirstate_file_contents.is_empty() {
+            if dirstate_file_contents.is_empty() {
                 self.dirstate_parents.set(DirstateParents::NULL);
                 Ok(OwningDirstateMap::new_empty(Vec::new()))
             } else {
@@ -372,7 +370,7 @@
                 )?;
                 self.dirstate_parents.set(parents);
                 Ok(map)
-            };
+            }
         }
     }
 
--- a/rust/hg-core/src/revlog.rs	Thu Mar 02 15:21:36 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-// Copyright 2018-2020 Georges Racinet <georges.racinet@octobus.net>
-//           and Mercurial contributors
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-//! Mercurial concepts for handling revision history
-
-pub mod node;
-pub mod nodemap;
-mod nodemap_docket;
-pub mod path_encode;
-pub use node::{FromHexError, Node, NodePrefix};
-pub mod changelog;
-pub mod filelog;
-pub mod index;
-pub mod manifest;
-pub mod patch;
-pub mod revlog;
-
-/// Mercurial revision numbers
-///
-/// As noted in revlog.c, revision numbers are actually encoded in
-/// 4 bytes, and are liberally converted to ints, whence the i32
-pub type Revision = i32;
-
-/// Marker expressing the absence of a parent
-///
-/// Independently of the actual representation, `NULL_REVISION` is guaranteed
-/// to be smaller than all existing revisions.
-pub const NULL_REVISION: Revision = -1;
-
-/// Same as `mercurial.node.wdirrev`
-///
-/// This is also equal to `i32::max_value()`, but it's better to spell
-/// it out explicitely, same as in `mercurial.node`
-#[allow(clippy::unreadable_literal)]
-pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
-
-pub const WORKING_DIRECTORY_HEX: &str =
-    "ffffffffffffffffffffffffffffffffffffffff";
-
-/// The simplest expression of what we need of Mercurial DAGs.
-pub trait Graph {
-    /// Return the two parents of the given `Revision`.
-    ///
-    /// Each of the parents can be independently `NULL_REVISION`
-    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
-}
-
-#[derive(Clone, Debug, PartialEq)]
-pub enum GraphError {
-    ParentOutOfRange(Revision),
-    WorkingDirectoryUnsupported,
-}
-
-/// The Mercurial Revlog Index
-///
-/// This is currently limited to the minimal interface that is needed for
-/// the [`nodemap`](nodemap/index.html) module
-pub trait RevlogIndex {
-    /// Total number of Revisions referenced in this index
-    fn len(&self) -> usize;
-
-    fn is_empty(&self) -> bool {
-        self.len() == 0
-    }
-
-    /// Return a reference to the Node or `None` if rev is out of bounds
-    ///
-    /// `NULL_REVISION` is not considered to be out of bounds.
-    fn node(&self, rev: Revision) -> Option<&Node>;
-}
--- a/rust/hg-core/src/revlog/changelog.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/changelog.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,7 +1,7 @@
 use crate::errors::HgError;
-use crate::revlog::revlog::{Revlog, RevlogEntry, RevlogError};
 use crate::revlog::Revision;
 use crate::revlog::{Node, NodePrefix};
+use crate::revlog::{Revlog, RevlogEntry, RevlogError};
 use crate::utils::hg_path::HgPath;
 use crate::vfs::Vfs;
 use itertools::Itertools;
@@ -165,7 +165,7 @@
     pub fn files(&self) -> impl Iterator<Item = &HgPath> {
         self.bytes[self.timestamp_end + 1..self.files_end]
             .split(|b| b == &b'\n')
-            .map(|path| HgPath::new(path))
+            .map(HgPath::new)
     }
 
     /// The change description.
--- a/rust/hg-core/src/revlog/filelog.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/filelog.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,10 +1,10 @@
 use crate::errors::HgError;
 use crate::repo::Repo;
 use crate::revlog::path_encode::path_encode;
-use crate::revlog::revlog::RevlogEntry;
-use crate::revlog::revlog::{Revlog, RevlogError};
 use crate::revlog::NodePrefix;
 use crate::revlog::Revision;
+use crate::revlog::RevlogEntry;
+use crate::revlog::{Revlog, RevlogError};
 use crate::utils::files::get_path_from_bytes;
 use crate::utils::hg_path::HgPath;
 use crate::utils::SliceExt;
@@ -49,7 +49,7 @@
         file_rev: Revision,
     ) -> Result<FilelogRevisionData, RevlogError> {
         let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
-        Ok(FilelogRevisionData(data.into()))
+        Ok(FilelogRevisionData(data))
     }
 
     /// The given node ID is that of the file as found in a filelog, not of a
@@ -161,7 +161,7 @@
         // this `FilelogEntry` does not have such metadata:
         let file_data_len = uncompressed_len;
 
-        return file_data_len != other_len;
+        file_data_len != other_len
     }
 
     pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
--- a/rust/hg-core/src/revlog/index.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/index.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,4 +1,3 @@
-use std::convert::TryInto;
 use std::ops::Deref;
 
 use byteorder::{BigEndian, ByteOrder};
@@ -22,11 +21,11 @@
 impl IndexHeaderFlags {
     /// Corresponds to FLAG_INLINE_DATA in python
     pub fn is_inline(self) -> bool {
-        return self.flags & 1 != 0;
+        self.flags & 1 != 0
     }
     /// Corresponds to FLAG_GENERALDELTA in python
     pub fn uses_generaldelta(self) -> bool {
-        return self.flags & 2 != 0;
+        self.flags & 2 != 0
     }
 }
 
@@ -36,9 +35,9 @@
     fn format_flags(&self) -> IndexHeaderFlags {
         // No "unknown flags" check here, unlike in python. Maybe there should
         // be.
-        return IndexHeaderFlags {
+        IndexHeaderFlags {
             flags: BigEndian::read_u16(&self.header_bytes[0..2]),
-        };
+        }
     }
 
     /// The only revlog version currently supported by rhg.
@@ -46,7 +45,7 @@
 
     /// Corresponds to `_format_version` in Python.
     fn format_version(&self) -> u16 {
-        return BigEndian::read_u16(&self.header_bytes[2..4]);
+        BigEndian::read_u16(&self.header_bytes[2..4])
     }
 
     const EMPTY_INDEX_HEADER: IndexHeader = IndexHeader {
@@ -60,7 +59,7 @@
     };
 
     fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> {
-        if index_bytes.len() == 0 {
+        if index_bytes.is_empty() {
             return Ok(IndexHeader::EMPTY_INDEX_HEADER);
         }
         if index_bytes.len() < 4 {
@@ -68,13 +67,13 @@
                 "corrupted revlog: can't read the index format header",
             ));
         }
-        return Ok(IndexHeader {
+        Ok(IndexHeader {
             header_bytes: {
                 let bytes: [u8; 4] =
                     index_bytes[0..4].try_into().expect("impossible");
                 bytes
             },
-        });
+        })
     }
 }
 
@@ -128,8 +127,7 @@
                     uses_generaldelta,
                 })
             } else {
-                Err(HgError::corrupted("unexpected inline revlog length")
-                    .into())
+                Err(HgError::corrupted("unexpected inline revlog length"))
             }
         } else {
             Ok(Self {
@@ -327,6 +325,7 @@
 
     #[cfg(test)]
     impl IndexEntryBuilder {
+        #[allow(clippy::new_without_default)]
         pub fn new() -> Self {
             Self {
                 is_first: false,
@@ -466,8 +465,8 @@
             .with_inline(false)
             .build();
 
-        assert_eq!(is_inline(&bytes), false);
-        assert_eq!(uses_generaldelta(&bytes), false);
+        assert!(!is_inline(&bytes));
+        assert!(!uses_generaldelta(&bytes));
     }
 
     #[test]
@@ -478,8 +477,8 @@
             .with_inline(true)
             .build();
 
-        assert_eq!(is_inline(&bytes), true);
-        assert_eq!(uses_generaldelta(&bytes), false);
+        assert!(is_inline(&bytes));
+        assert!(!uses_generaldelta(&bytes));
     }
 
     #[test]
@@ -490,8 +489,8 @@
             .with_inline(true)
             .build();
 
-        assert_eq!(is_inline(&bytes), true);
-        assert_eq!(uses_generaldelta(&bytes), true);
+        assert!(is_inline(&bytes));
+        assert!(uses_generaldelta(&bytes));
     }
 
     #[test]
--- a/rust/hg-core/src/revlog/manifest.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/manifest.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,7 +1,7 @@
 use crate::errors::HgError;
-use crate::revlog::revlog::{Revlog, RevlogError};
 use crate::revlog::Revision;
 use crate::revlog::{Node, NodePrefix};
+use crate::revlog::{Revlog, RevlogError};
 use crate::utils::hg_path::HgPath;
 use crate::utils::SliceExt;
 use crate::vfs::Vfs;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/revlog/mod.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,710 @@
+// Copyright 2018-2023 Georges Racinet <georges.racinet@octobus.net>
+//           and Mercurial contributors
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+//! Mercurial concepts for handling revision history
+
+pub mod node;
+pub mod nodemap;
+mod nodemap_docket;
+pub mod path_encode;
+pub use node::{FromHexError, Node, NodePrefix};
+pub mod changelog;
+pub mod filelog;
+pub mod index;
+pub mod manifest;
+pub mod patch;
+
+use std::borrow::Cow;
+use std::io::Read;
+use std::ops::Deref;
+use std::path::Path;
+
+use flate2::read::ZlibDecoder;
+use sha1::{Digest, Sha1};
+use zstd;
+
+use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
+use self::nodemap_docket::NodeMapDocket;
+use super::index::Index;
+use super::nodemap::{NodeMap, NodeMapError};
+use crate::errors::HgError;
+use crate::vfs::Vfs;
+
+/// Mercurial revision numbers
+///
+/// As noted in revlog.c, revision numbers are actually encoded in
+/// 4 bytes, and are liberally converted to ints, whence the i32
+pub type Revision = i32;
+
+/// Marker expressing the absence of a parent
+///
+/// Independently of the actual representation, `NULL_REVISION` is guaranteed
+/// to be smaller than all existing revisions.
+pub const NULL_REVISION: Revision = -1;
+
+/// Same as `mercurial.node.wdirrev`
+///
+/// This is also equal to `i32::max_value()`, but it's better to spell
+/// it out explicitely, same as in `mercurial.node`
+#[allow(clippy::unreadable_literal)]
+pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
+
+pub const WORKING_DIRECTORY_HEX: &str =
+    "ffffffffffffffffffffffffffffffffffffffff";
+
+/// The simplest expression of what we need of Mercurial DAGs.
+pub trait Graph {
+    /// Return the two parents of the given `Revision`.
+    ///
+    /// Each of the parents can be independently `NULL_REVISION`
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError>;
+}
+
+#[derive(Clone, Debug, PartialEq)]
+pub enum GraphError {
+    ParentOutOfRange(Revision),
+    WorkingDirectoryUnsupported,
+}
+
+/// The Mercurial Revlog Index
+///
+/// This is currently limited to the minimal interface that is needed for
+/// the [`nodemap`](nodemap/index.html) module
+pub trait RevlogIndex {
+    /// Total number of Revisions referenced in this index
+    fn len(&self) -> usize;
+
+    fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    /// Return a reference to the Node or `None` if rev is out of bounds
+    ///
+    /// `NULL_REVISION` is not considered to be out of bounds.
+    fn node(&self, rev: Revision) -> Option<&Node>;
+}
+
+const REVISION_FLAG_CENSORED: u16 = 1 << 15;
+const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
+const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
+const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
+
+// Keep this in sync with REVIDX_KNOWN_FLAGS in
+// mercurial/revlogutils/flagutil.py
+const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
+    | REVISION_FLAG_ELLIPSIS
+    | REVISION_FLAG_EXTSTORED
+    | REVISION_FLAG_HASCOPIESINFO;
+
+const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
+
+#[derive(Debug, derive_more::From)]
+pub enum RevlogError {
+    InvalidRevision,
+    /// Working directory is not supported
+    WDirUnsupported,
+    /// Found more than one entry whose ID match the requested prefix
+    AmbiguousPrefix,
+    #[from]
+    Other(HgError),
+}
+
+impl From<NodeMapError> for RevlogError {
+    fn from(error: NodeMapError) -> Self {
+        match error {
+            NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
+            NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
+                format!("nodemap point to revision {} not in index", rev),
+            ),
+        }
+    }
+}
+
+fn corrupted<S: AsRef<str>>(context: S) -> HgError {
+    HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
+}
+
+impl RevlogError {
+    fn corrupted<S: AsRef<str>>(context: S) -> Self {
+        RevlogError::Other(corrupted(context))
+    }
+}
+
+/// Read only implementation of revlog.
+pub struct Revlog {
+    /// When index and data are not interleaved: bytes of the revlog index.
+    /// When index and data are interleaved: bytes of the revlog index and
+    /// data.
+    index: Index,
+    /// When index and data are not interleaved: bytes of the revlog data
+    data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
+    /// When present on disk: the persistent nodemap for this revlog
+    nodemap: Option<nodemap::NodeTree>,
+}
+
+impl Revlog {
+    /// Open a revlog index file.
+    ///
+    /// It will also open the associated data file if index and data are not
+    /// interleaved.
+    pub fn open(
+        store_vfs: &Vfs,
+        index_path: impl AsRef<Path>,
+        data_path: Option<&Path>,
+        use_nodemap: bool,
+    ) -> Result<Self, HgError> {
+        let index_path = index_path.as_ref();
+        let index = {
+            match store_vfs.mmap_open_opt(&index_path)? {
+                None => Index::new(Box::new(vec![])),
+                Some(index_mmap) => {
+                    let index = Index::new(Box::new(index_mmap))?;
+                    Ok(index)
+                }
+            }
+        }?;
+
+        let default_data_path = index_path.with_extension("d");
+
+        // type annotation required
+        // won't recognize Mmap as Deref<Target = [u8]>
+        let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
+            if index.is_inline() {
+                None
+            } else {
+                let data_path = data_path.unwrap_or(&default_data_path);
+                let data_mmap = store_vfs.mmap_open(data_path)?;
+                Some(Box::new(data_mmap))
+            };
+
+        let nodemap = if index.is_inline() || !use_nodemap {
+            None
+        } else {
+            NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
+                |(docket, data)| {
+                    nodemap::NodeTree::load_bytes(
+                        Box::new(data),
+                        docket.data_length,
+                    )
+                },
+            )
+        };
+
+        Ok(Revlog {
+            index,
+            data_bytes,
+            nodemap,
+        })
+    }
+
+    /// Return number of entries of the `Revlog`.
+    pub fn len(&self) -> usize {
+        self.index.len()
+    }
+
+    /// Returns `true` if the `Revlog` has zero `entries`.
+    pub fn is_empty(&self) -> bool {
+        self.index.is_empty()
+    }
+
+    /// Returns the node ID for the given revision number, if it exists in this
+    /// revlog
+    pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
+        if rev == NULL_REVISION {
+            return Some(&NULL_NODE);
+        }
+        Some(self.index.get_entry(rev)?.hash())
+    }
+
+    /// Return the revision number for the given node ID, if it exists in this
+    /// revlog
+    pub fn rev_from_node(
+        &self,
+        node: NodePrefix,
+    ) -> Result<Revision, RevlogError> {
+        if node.is_prefix_of(&NULL_NODE) {
+            return Ok(NULL_REVISION);
+        }
+
+        if let Some(nodemap) = &self.nodemap {
+            return nodemap
+                .find_bin(&self.index, node)?
+                .ok_or(RevlogError::InvalidRevision);
+        }
+
+        // Fallback to linear scan when a persistent nodemap is not present.
+        // This happens when the persistent-nodemap experimental feature is not
+        // enabled, or for small revlogs.
+        //
+        // TODO: consider building a non-persistent nodemap in memory to
+        // optimize these cases.
+        let mut found_by_prefix = None;
+        for rev in (0..self.len() as Revision).rev() {
+            let index_entry = self.index.get_entry(rev).ok_or_else(|| {
+                HgError::corrupted(
+                    "revlog references a revision not in the index",
+                )
+            })?;
+            if node == *index_entry.hash() {
+                return Ok(rev);
+            }
+            if node.is_prefix_of(index_entry.hash()) {
+                if found_by_prefix.is_some() {
+                    return Err(RevlogError::AmbiguousPrefix);
+                }
+                found_by_prefix = Some(rev)
+            }
+        }
+        found_by_prefix.ok_or(RevlogError::InvalidRevision)
+    }
+
+    /// Returns whether the given revision exists in this revlog.
+    pub fn has_rev(&self, rev: Revision) -> bool {
+        self.index.get_entry(rev).is_some()
+    }
+
+    /// Return the full data associated to a revision.
+    ///
+    /// All entries required to build the final data out of deltas will be
+    /// retrieved as needed, and the deltas will be applied to the inital
+    /// snapshot to rebuild the final data.
+    pub fn get_rev_data(
+        &self,
+        rev: Revision,
+    ) -> Result<Cow<[u8]>, RevlogError> {
+        if rev == NULL_REVISION {
+            return Ok(Cow::Borrowed(&[]));
+        };
+        Ok(self.get_entry(rev)?.data()?)
+    }
+
+    /// Check the hash of some given data against the recorded hash.
+    pub fn check_hash(
+        &self,
+        p1: Revision,
+        p2: Revision,
+        expected: &[u8],
+        data: &[u8],
+    ) -> bool {
+        let e1 = self.index.get_entry(p1);
+        let h1 = match e1 {
+            Some(ref entry) => entry.hash(),
+            None => &NULL_NODE,
+        };
+        let e2 = self.index.get_entry(p2);
+        let h2 = match e2 {
+            Some(ref entry) => entry.hash(),
+            None => &NULL_NODE,
+        };
+
+        hash(data, h1.as_bytes(), h2.as_bytes()) == expected
+    }
+
+    /// Build the full data of a revision out its snapshot
+    /// and its deltas.
+    fn build_data_from_deltas(
+        snapshot: RevlogEntry,
+        deltas: &[RevlogEntry],
+    ) -> Result<Vec<u8>, HgError> {
+        let snapshot = snapshot.data_chunk()?;
+        let deltas = deltas
+            .iter()
+            .rev()
+            .map(RevlogEntry::data_chunk)
+            .collect::<Result<Vec<_>, _>>()?;
+        let patches: Vec<_> =
+            deltas.iter().map(|d| patch::PatchList::new(d)).collect();
+        let patch = patch::fold_patch_lists(&patches);
+        Ok(patch.apply(&snapshot))
+    }
+
+    /// Return the revlog data.
+    fn data(&self) -> &[u8] {
+        match &self.data_bytes {
+            Some(data_bytes) => data_bytes,
+            None => panic!(
+                "forgot to load the data or trying to access inline data"
+            ),
+        }
+    }
+
+    pub fn make_null_entry(&self) -> RevlogEntry {
+        RevlogEntry {
+            revlog: self,
+            rev: NULL_REVISION,
+            bytes: b"",
+            compressed_len: 0,
+            uncompressed_len: 0,
+            base_rev_or_base_of_delta_chain: None,
+            p1: NULL_REVISION,
+            p2: NULL_REVISION,
+            flags: NULL_REVLOG_ENTRY_FLAGS,
+            hash: NULL_NODE,
+        }
+    }
+
+    /// Get an entry of the revlog.
+    pub fn get_entry(
+        &self,
+        rev: Revision,
+    ) -> Result<RevlogEntry, RevlogError> {
+        if rev == NULL_REVISION {
+            return Ok(self.make_null_entry());
+        }
+        let index_entry = self
+            .index
+            .get_entry(rev)
+            .ok_or(RevlogError::InvalidRevision)?;
+        let start = index_entry.offset();
+        let end = start + index_entry.compressed_len() as usize;
+        let data = if self.index.is_inline() {
+            self.index.data(start, end)
+        } else {
+            &self.data()[start..end]
+        };
+        let entry = RevlogEntry {
+            revlog: self,
+            rev,
+            bytes: data,
+            compressed_len: index_entry.compressed_len(),
+            uncompressed_len: index_entry.uncompressed_len(),
+            base_rev_or_base_of_delta_chain: if index_entry
+                .base_revision_or_base_of_delta_chain()
+                == rev
+            {
+                None
+            } else {
+                Some(index_entry.base_revision_or_base_of_delta_chain())
+            },
+            p1: index_entry.p1(),
+            p2: index_entry.p2(),
+            flags: index_entry.flags(),
+            hash: *index_entry.hash(),
+        };
+        Ok(entry)
+    }
+
+    /// when resolving internal references within revlog, any errors
+    /// should be reported as corruption, instead of e.g. "invalid revision"
+    fn get_entry_internal(
+        &self,
+        rev: Revision,
+    ) -> Result<RevlogEntry, HgError> {
+        self.get_entry(rev)
+            .map_err(|_| corrupted(format!("revision {} out of range", rev)))
+    }
+}
+
+/// The revlog entry's bytes and the necessary informations to extract
+/// the entry's data.
+#[derive(Clone)]
+pub struct RevlogEntry<'a> {
+    revlog: &'a Revlog,
+    rev: Revision,
+    bytes: &'a [u8],
+    compressed_len: u32,
+    uncompressed_len: i32,
+    base_rev_or_base_of_delta_chain: Option<Revision>,
+    p1: Revision,
+    p2: Revision,
+    flags: u16,
+    hash: Node,
+}
+
+impl<'a> RevlogEntry<'a> {
+    pub fn revision(&self) -> Revision {
+        self.rev
+    }
+
+    pub fn node(&self) -> &Node {
+        &self.hash
+    }
+
+    pub fn uncompressed_len(&self) -> Option<u32> {
+        u32::try_from(self.uncompressed_len).ok()
+    }
+
+    pub fn has_p1(&self) -> bool {
+        self.p1 != NULL_REVISION
+    }
+
+    pub fn p1_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
+        if self.p1 == NULL_REVISION {
+            Ok(None)
+        } else {
+            Ok(Some(self.revlog.get_entry(self.p1)?))
+        }
+    }
+
+    pub fn p2_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
+        if self.p2 == NULL_REVISION {
+            Ok(None)
+        } else {
+            Ok(Some(self.revlog.get_entry(self.p2)?))
+        }
+    }
+
+    pub fn p1(&self) -> Option<Revision> {
+        if self.p1 == NULL_REVISION {
+            None
+        } else {
+            Some(self.p1)
+        }
+    }
+
+    pub fn p2(&self) -> Option<Revision> {
+        if self.p2 == NULL_REVISION {
+            None
+        } else {
+            Some(self.p2)
+        }
+    }
+
+    pub fn is_censored(&self) -> bool {
+        (self.flags & REVISION_FLAG_CENSORED) != 0
+    }
+
+    pub fn has_length_affecting_flag_processor(&self) -> bool {
+        // Relevant Python code: revlog.size()
+        // note: ELLIPSIS is known to not change the content
+        (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
+    }
+
+    /// The data for this entry, after resolving deltas if any.
+    pub fn rawdata(&self) -> Result<Cow<'a, [u8]>, HgError> {
+        let mut entry = self.clone();
+        let mut delta_chain = vec![];
+
+        // The meaning of `base_rev_or_base_of_delta_chain` depends on
+        // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
+        // `mercurial/revlogutils/constants.py` and the code in
+        // [_chaininfo] and in [index_deltachain].
+        let uses_generaldelta = self.revlog.index.uses_generaldelta();
+        while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
+            let base_rev = if uses_generaldelta {
+                base_rev
+            } else {
+                entry.rev - 1
+            };
+            delta_chain.push(entry);
+            entry = self.revlog.get_entry_internal(base_rev)?;
+        }
+
+        let data = if delta_chain.is_empty() {
+            entry.data_chunk()?
+        } else {
+            Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
+        };
+
+        Ok(data)
+    }
+
+    fn check_data(
+        &self,
+        data: Cow<'a, [u8]>,
+    ) -> Result<Cow<'a, [u8]>, HgError> {
+        if self.revlog.check_hash(
+            self.p1,
+            self.p2,
+            self.hash.as_bytes(),
+            &data,
+        ) {
+            Ok(data)
+        } else {
+            if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
+                return Err(HgError::unsupported(
+                    "ellipsis revisions are not supported by rhg",
+                ));
+            }
+            Err(corrupted(format!(
+                "hash check failed for revision {}",
+                self.rev
+            )))
+        }
+    }
+
+    pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
+        let data = self.rawdata()?;
+        if self.is_censored() {
+            return Err(HgError::CensoredNodeError);
+        }
+        self.check_data(data)
+    }
+
+    /// Extract the data contained in the entry.
+    /// This may be a delta. (See `is_delta`.)
+    fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
+        if self.bytes.is_empty() {
+            return Ok(Cow::Borrowed(&[]));
+        }
+        match self.bytes[0] {
+            // Revision data is the entirety of the entry, including this
+            // header.
+            b'\0' => Ok(Cow::Borrowed(self.bytes)),
+            // Raw revision data follows.
+            b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
+            // zlib (RFC 1950) data.
+            b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
+            // zstd data.
+            b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
+            // A proper new format should have had a repo/store requirement.
+            format_type => Err(corrupted(format!(
+                "unknown compression header '{}'",
+                format_type
+            ))),
+        }
+    }
+
+    fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
+        let mut decoder = ZlibDecoder::new(self.bytes);
+        if self.is_delta() {
+            let mut buf = Vec::with_capacity(self.compressed_len as usize);
+            decoder
+                .read_to_end(&mut buf)
+                .map_err(|e| corrupted(e.to_string()))?;
+            Ok(buf)
+        } else {
+            let cap = self.uncompressed_len.max(0) as usize;
+            let mut buf = vec![0; cap];
+            decoder
+                .read_exact(&mut buf)
+                .map_err(|e| corrupted(e.to_string()))?;
+            Ok(buf)
+        }
+    }
+
+    fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
+        if self.is_delta() {
+            let mut buf = Vec::with_capacity(self.compressed_len as usize);
+            zstd::stream::copy_decode(self.bytes, &mut buf)
+                .map_err(|e| corrupted(e.to_string()))?;
+            Ok(buf)
+        } else {
+            let cap = self.uncompressed_len.max(0) as usize;
+            let mut buf = vec![0; cap];
+            let len = zstd::bulk::decompress_to_buffer(self.bytes, &mut buf)
+                .map_err(|e| corrupted(e.to_string()))?;
+            if len != self.uncompressed_len as usize {
+                Err(corrupted("uncompressed length does not match"))
+            } else {
+                Ok(buf)
+            }
+        }
+    }
+
+    /// Tell if the entry is a snapshot or a delta
+    /// (influences on decompression).
+    fn is_delta(&self) -> bool {
+        self.base_rev_or_base_of_delta_chain.is_some()
+    }
+}
+
+/// Calculate the hash of a revision given its data and its parents.
+fn hash(
+    data: &[u8],
+    p1_hash: &[u8],
+    p2_hash: &[u8],
+) -> [u8; NODE_BYTES_LENGTH] {
+    let mut hasher = Sha1::new();
+    let (a, b) = (p1_hash, p2_hash);
+    if a > b {
+        hasher.update(b);
+        hasher.update(a);
+    } else {
+        hasher.update(a);
+        hasher.update(b);
+    }
+    hasher.update(data);
+    *hasher.finalize().as_ref()
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
+    use itertools::Itertools;
+
+    #[test]
+    fn test_empty() {
+        let temp = tempfile::tempdir().unwrap();
+        let vfs = Vfs { base: temp.path() };
+        std::fs::write(temp.path().join("foo.i"), b"").unwrap();
+        let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
+        assert!(revlog.is_empty());
+        assert_eq!(revlog.len(), 0);
+        assert!(revlog.get_entry(0).is_err());
+        assert!(!revlog.has_rev(0));
+    }
+
+    #[test]
+    fn test_inline() {
+        let temp = tempfile::tempdir().unwrap();
+        let vfs = Vfs { base: temp.path() };
+        let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
+            .unwrap();
+        let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
+            .unwrap();
+        let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
+            .unwrap();
+        let entry0_bytes = IndexEntryBuilder::new()
+            .is_first(true)
+            .with_version(1)
+            .with_inline(true)
+            .with_offset(INDEX_ENTRY_SIZE)
+            .with_node(node0)
+            .build();
+        let entry1_bytes = IndexEntryBuilder::new()
+            .with_offset(INDEX_ENTRY_SIZE)
+            .with_node(node1)
+            .build();
+        let entry2_bytes = IndexEntryBuilder::new()
+            .with_offset(INDEX_ENTRY_SIZE)
+            .with_p1(0)
+            .with_p2(1)
+            .with_node(node2)
+            .build();
+        let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
+            .into_iter()
+            .flatten()
+            .collect_vec();
+        std::fs::write(temp.path().join("foo.i"), contents).unwrap();
+        let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
+
+        let entry0 = revlog.get_entry(0).ok().unwrap();
+        assert_eq!(entry0.revision(), 0);
+        assert_eq!(*entry0.node(), node0);
+        assert!(!entry0.has_p1());
+        assert_eq!(entry0.p1(), None);
+        assert_eq!(entry0.p2(), None);
+        let p1_entry = entry0.p1_entry().unwrap();
+        assert!(p1_entry.is_none());
+        let p2_entry = entry0.p2_entry().unwrap();
+        assert!(p2_entry.is_none());
+
+        let entry1 = revlog.get_entry(1).ok().unwrap();
+        assert_eq!(entry1.revision(), 1);
+        assert_eq!(*entry1.node(), node1);
+        assert!(!entry1.has_p1());
+        assert_eq!(entry1.p1(), None);
+        assert_eq!(entry1.p2(), None);
+        let p1_entry = entry1.p1_entry().unwrap();
+        assert!(p1_entry.is_none());
+        let p2_entry = entry1.p2_entry().unwrap();
+        assert!(p2_entry.is_none());
+
+        let entry2 = revlog.get_entry(2).ok().unwrap();
+        assert_eq!(entry2.revision(), 2);
+        assert_eq!(*entry2.node(), node2);
+        assert!(entry2.has_p1());
+        assert_eq!(entry2.p1(), Some(0));
+        assert_eq!(entry2.p2(), Some(1));
+        let p1_entry = entry2.p1_entry().unwrap();
+        assert!(p1_entry.is_some());
+        assert_eq!(p1_entry.unwrap().revision(), 0);
+        let p2_entry = entry2.p2_entry().unwrap();
+        assert!(p2_entry.is_some());
+        assert_eq!(p2_entry.unwrap().revision(), 1);
+    }
+}
--- a/rust/hg-core/src/revlog/node.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/node.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -10,7 +10,6 @@
 
 use crate::errors::HgError;
 use bytes_cast::BytesCast;
-use std::convert::{TryFrom, TryInto};
 use std::fmt;
 
 /// The length in bytes of a `Node`
@@ -315,7 +314,7 @@
 
 impl PartialEq<Node> for NodePrefix {
     fn eq(&self, other: &Node) -> bool {
-        Self::from(*other) == *self
+        self.data == other.data && self.nybbles_len() == other.nybbles_len()
     }
 }
 
--- a/rust/hg-core/src/revlog/nodemap.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/nodemap.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -71,7 +71,7 @@
     ///
     /// If several Revisions match the given prefix, a [`MultipleResults`]
     /// error is returned.
-    fn find_bin<'a>(
+    fn find_bin(
         &self,
         idx: &impl RevlogIndex,
         prefix: NodePrefix,
@@ -88,7 +88,7 @@
     ///
     /// If several Revisions match the given prefix, a [`MultipleResults`]
     /// error is returned.
-    fn unique_prefix_len_bin<'a>(
+    fn unique_prefix_len_bin(
         &self,
         idx: &impl RevlogIndex,
         node_prefix: NodePrefix,
@@ -249,7 +249,7 @@
     rev: Revision,
 ) -> Result<Option<Revision>, NodeMapError> {
     idx.node(rev)
-        .ok_or_else(|| NodeMapError::RevisionNotInIndex(rev))
+        .ok_or(NodeMapError::RevisionNotInIndex(rev))
         .map(|node| {
             if prefix.is_prefix_of(node) {
                 Some(rev)
@@ -468,7 +468,7 @@
         if let Element::Rev(old_rev) = deepest.element {
             let old_node = index
                 .node(old_rev)
-                .ok_or_else(|| NodeMapError::RevisionNotInIndex(old_rev))?;
+                .ok_or(NodeMapError::RevisionNotInIndex(old_rev))?;
             if old_node == node {
                 return Ok(()); // avoid creating lots of useless blocks
             }
@@ -865,7 +865,7 @@
             hex: &str,
         ) -> Result<(), NodeMapError> {
             let node = pad_node(hex);
-            self.index.insert(rev, node.clone());
+            self.index.insert(rev, node);
             self.nt.insert(&self.index, &node, rev)?;
             Ok(())
         }
@@ -887,13 +887,13 @@
         /// Drain `added` and restart a new one
         fn commit(self) -> Self {
             let mut as_vec: Vec<Block> =
-                self.nt.readonly.iter().map(|block| block.clone()).collect();
+                self.nt.readonly.iter().copied().collect();
             as_vec.extend(self.nt.growable);
             as_vec.push(self.nt.root);
 
             Self {
                 index: self.index,
-                nt: NodeTree::from(as_vec).into(),
+                nt: NodeTree::from(as_vec),
             }
         }
     }
@@ -967,15 +967,15 @@
         let idx = &mut nt_idx.index;
 
         let node0_hex = hex_pad_right("444444");
-        let mut node1_hex = hex_pad_right("444444").clone();
+        let mut node1_hex = hex_pad_right("444444");
         node1_hex.pop();
         node1_hex.push('5');
         let node0 = Node::from_hex(&node0_hex).unwrap();
         let node1 = Node::from_hex(&node1_hex).unwrap();
 
-        idx.insert(0, node0.clone());
+        idx.insert(0, node0);
         nt.insert(idx, &node0, 0)?;
-        idx.insert(1, node1.clone());
+        idx.insert(1, node1);
         nt.insert(idx, &node1, 1)?;
 
         assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(0));
--- a/rust/hg-core/src/revlog/nodemap_docket.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/nodemap_docket.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -3,7 +3,6 @@
 use memmap2::Mmap;
 use std::path::{Path, PathBuf};
 
-use crate::utils::strip_suffix;
 use crate::vfs::Vfs;
 
 const ONDISK_VERSION: u8 = 1;
@@ -97,8 +96,9 @@
         .expect("expected a base name")
         .to_str()
         .expect("expected an ASCII file name in the store");
-    let prefix = strip_suffix(docket_name, ".n.a")
-        .or_else(|| strip_suffix(docket_name, ".n"))
+    let prefix = docket_name
+        .strip_suffix(".n.a")
+        .or_else(|| docket_name.strip_suffix(".n"))
         .expect("expected docket path in .n or .n.a");
     let name = format!("{}-{}.nd", prefix, uid);
     docket_path
--- a/rust/hg-core/src/revlog/path_encode.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revlog/path_encode.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -2,6 +2,7 @@
 
 #[derive(PartialEq, Debug)]
 #[allow(non_camel_case_types)]
+#[allow(clippy::upper_case_acronyms)]
 enum path_state {
     START, /* first byte of a path component */
     A,     /* "AUX" */
@@ -27,6 +28,7 @@
 
 /* state machine for dir-encoding */
 #[allow(non_camel_case_types)]
+#[allow(clippy::upper_case_acronyms)]
 enum dir_state {
     DDOT,
     DH,
@@ -34,65 +36,104 @@
     DDEFAULT,
 }
 
+trait Sink {
+    fn write_byte(&mut self, c: u8);
+    fn write_bytes(&mut self, c: &[u8]);
+}
+
 fn inset(bitset: &[u32; 8], c: u8) -> bool {
     bitset[(c as usize) >> 5] & (1 << (c & 31)) != 0
 }
 
-fn charcopy(dest: Option<&mut [u8]>, destlen: &mut usize, c: u8) {
-    if let Some(slice) = dest {
-        slice[*destlen] = c
-    }
-    *destlen += 1
+const MAXENCODE: usize = 4096 * 4;
+
+struct DestArr<const N: usize> {
+    buf: [u8; N],
+    pub len: usize,
 }
 
-fn memcopy(dest: Option<&mut [u8]>, destlen: &mut usize, src: &[u8]) {
-    if let Some(slice) = dest {
-        slice[*destlen..*destlen + src.len()].copy_from_slice(src)
+impl<const N: usize> DestArr<N> {
+    pub fn create() -> Self {
+        DestArr {
+            buf: [0; N],
+            len: 0,
+        }
     }
-    *destlen += src.len();
+
+    pub fn contents(&self) -> &[u8] {
+        &self.buf[..self.len]
+    }
 }
 
-fn rewrap_option<'a, 'b: 'a>(
-    x: &'a mut Option<&'b mut [u8]>,
-) -> Option<&'a mut [u8]> {
-    match x {
-        None => None,
-        Some(y) => Some(y),
+impl<const N: usize> Sink for DestArr<N> {
+    fn write_byte(&mut self, c: u8) {
+        self.buf[self.len] = c;
+        self.len += 1;
+    }
+
+    fn write_bytes(&mut self, src: &[u8]) {
+        self.buf[self.len..self.len + src.len()].copy_from_slice(src);
+        self.len += src.len();
     }
 }
 
-fn hexencode<'a>(mut dest: Option<&'a mut [u8]>, destlen: &mut usize, c: u8) {
+struct MeasureDest {
+    pub len: usize,
+}
+
+impl Sink for Vec<u8> {
+    fn write_byte(&mut self, c: u8) {
+        self.push(c)
+    }
+
+    fn write_bytes(&mut self, src: &[u8]) {
+        self.extend_from_slice(src)
+    }
+}
+
+impl MeasureDest {
+    fn create() -> Self {
+        Self { len: 0 }
+    }
+}
+
+impl Sink for MeasureDest {
+    fn write_byte(&mut self, _c: u8) {
+        self.len += 1;
+    }
+
+    fn write_bytes(&mut self, src: &[u8]) {
+        self.len += src.len();
+    }
+}
+
+fn hexencode(dest: &mut impl Sink, c: u8) {
     let hexdigit = b"0123456789abcdef";
-    charcopy(
-        rewrap_option(&mut dest),
-        destlen,
-        hexdigit[(c as usize) >> 4],
-    );
-    charcopy(dest, destlen, hexdigit[(c as usize) & 15]);
+    dest.write_byte(hexdigit[(c as usize) >> 4]);
+    dest.write_byte(hexdigit[(c as usize) & 15]);
 }
 
 /* 3-byte escape: tilde followed by two hex digits */
-fn escape3(mut dest: Option<&mut [u8]>, destlen: &mut usize, c: u8) {
-    charcopy(rewrap_option(&mut dest), destlen, b'~');
-    hexencode(dest, destlen, c);
+fn escape3(dest: &mut impl Sink, c: u8) {
+    dest.write_byte(b'~');
+    hexencode(dest, c);
 }
 
-fn encode_dir(mut dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+fn encode_dir(dest: &mut impl Sink, src: &[u8]) {
     let mut state = dir_state::DDEFAULT;
     let mut i = 0;
-    let mut destlen = 0;
 
     while i < src.len() {
         match state {
             dir_state::DDOT => match src[i] {
                 b'd' | b'i' => {
                     state = dir_state::DHGDI;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'h' => {
                     state = dir_state::DH;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 _ => {
@@ -102,7 +143,7 @@
             dir_state::DH => {
                 if src[i] == b'g' {
                     state = dir_state::DHGDI;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = dir_state::DDEFAULT;
@@ -110,8 +151,8 @@
             }
             dir_state::DHGDI => {
                 if src[i] == b'/' {
-                    memcopy(rewrap_option(&mut dest), &mut destlen, b".hg");
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_bytes(b".hg");
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 state = dir_state::DDEFAULT;
@@ -120,66 +161,64 @@
                 if src[i] == b'.' {
                     state = dir_state::DDOT
                 }
-                charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                dest.write_byte(src[i]);
                 i += 1;
             }
         }
     }
-    destlen
 }
 
 fn _encode(
     twobytes: &[u32; 8],
     onebyte: &[u32; 8],
-    mut dest: Option<&mut [u8]>,
+    dest: &mut impl Sink,
     src: &[u8],
     encodedir: bool,
-) -> usize {
+) {
     let mut state = path_state::START;
     let mut i = 0;
-    let mut destlen = 0;
     let len = src.len();
 
     while i < len {
         match state {
             path_state::START => match src[i] {
                 b'/' => {
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'.' => {
                     state = path_state::LDOT;
-                    escape3(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    escape3(dest, src[i]);
                     i += 1;
                 }
                 b' ' => {
                     state = path_state::DEFAULT;
-                    escape3(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    escape3(dest, src[i]);
                     i += 1;
                 }
                 b'a' => {
                     state = path_state::A;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'c' => {
                     state = path_state::C;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'l' => {
                     state = path_state::L;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'n' => {
                     state = path_state::N;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'p' => {
                     state = path_state::P;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 _ => {
@@ -189,7 +228,7 @@
             path_state::A => {
                 if src[i] == b'u' {
                     state = path_state::AU;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
@@ -206,18 +245,14 @@
             path_state::THIRD => {
                 state = path_state::DEFAULT;
                 match src[i] {
-                    b'.' | b'/' | b'\0' => escape3(
-                        rewrap_option(&mut dest),
-                        &mut destlen,
-                        src[i - 1],
-                    ),
+                    b'.' | b'/' | b'\0' => escape3(dest, src[i - 1]),
                     _ => i -= 1,
                 }
             }
             path_state::C => {
                 if src[i] == b'o' {
                     state = path_state::CO;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
@@ -240,41 +275,25 @@
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
-                    charcopy(
-                        rewrap_option(&mut dest),
-                        &mut destlen,
-                        src[i - 1],
-                    );
+                    dest.write_byte(src[i - 1]);
                 }
             }
             path_state::COMLPTn => {
                 state = path_state::DEFAULT;
                 match src[i] {
                     b'.' | b'/' | b'\0' => {
-                        escape3(
-                            rewrap_option(&mut dest),
-                            &mut destlen,
-                            src[i - 2],
-                        );
-                        charcopy(
-                            rewrap_option(&mut dest),
-                            &mut destlen,
-                            src[i - 1],
-                        );
+                        escape3(dest, src[i - 2]);
+                        dest.write_byte(src[i - 1]);
                     }
                     _ => {
-                        memcopy(
-                            rewrap_option(&mut dest),
-                            &mut destlen,
-                            &src[i - 2..i],
-                        );
+                        dest.write_bytes(&src[i - 2..i]);
                     }
                 }
             }
             path_state::L => {
                 if src[i] == b'p' {
                     state = path_state::LP;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
@@ -291,7 +310,7 @@
             path_state::N => {
                 if src[i] == b'u' {
                     state = path_state::NU;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
@@ -308,7 +327,7 @@
             path_state::P => {
                 if src[i] == b'r' {
                     state = path_state::PR;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
@@ -325,12 +344,12 @@
             path_state::LDOT => match src[i] {
                 b'd' | b'i' => {
                     state = path_state::HGDI;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'h' => {
                     state = path_state::H;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 _ => {
@@ -340,30 +359,30 @@
             path_state::DOT => match src[i] {
                 b'/' | b'\0' => {
                     state = path_state::START;
-                    memcopy(rewrap_option(&mut dest), &mut destlen, b"~2e");
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_bytes(b"~2e");
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'd' | b'i' => {
                     state = path_state::HGDI;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, b'.');
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(b'.');
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 b'h' => {
                     state = path_state::H;
-                    memcopy(rewrap_option(&mut dest), &mut destlen, b".h");
+                    dest.write_bytes(b".h");
                     i += 1;
                 }
                 _ => {
                     state = path_state::DEFAULT;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, b'.');
+                    dest.write_byte(b'.');
                 }
             },
             path_state::H => {
                 if src[i] == b'g' {
                     state = path_state::HGDI;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 } else {
                     state = path_state::DEFAULT;
@@ -373,13 +392,9 @@
                 if src[i] == b'/' {
                     state = path_state::START;
                     if encodedir {
-                        memcopy(
-                            rewrap_option(&mut dest),
-                            &mut destlen,
-                            b".hg",
-                        );
+                        dest.write_bytes(b".hg");
                     }
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1
                 } else {
                     state = path_state::DEFAULT;
@@ -388,18 +403,18 @@
             path_state::SPACE => match src[i] {
                 b'/' | b'\0' => {
                     state = path_state::START;
-                    memcopy(rewrap_option(&mut dest), &mut destlen, b"~20");
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_bytes(b"~20");
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 _ => {
                     state = path_state::DEFAULT;
-                    charcopy(rewrap_option(&mut dest), &mut destlen, b' ');
+                    dest.write_byte(b' ');
                 }
             },
             path_state::DEFAULT => {
                 while i != len && inset(onebyte, src[i]) {
-                    charcopy(rewrap_option(&mut dest), &mut destlen, src[i]);
+                    dest.write_byte(src[i]);
                     i += 1;
                 }
                 if i == len {
@@ -416,17 +431,13 @@
                     }
                     b'/' => {
                         state = path_state::START;
-                        charcopy(rewrap_option(&mut dest), &mut destlen, b'/');
+                        dest.write_byte(b'/');
                         i += 1;
                     }
                     _ => {
                         if inset(onebyte, src[i]) {
                             loop {
-                                charcopy(
-                                    rewrap_option(&mut dest),
-                                    &mut destlen,
-                                    src[i],
-                                );
+                                dest.write_byte(src[i]);
                                 i += 1;
                                 if !(i < len && inset(onebyte, src[i])) {
                                     break;
@@ -435,22 +446,14 @@
                         } else if inset(twobytes, src[i]) {
                             let c = src[i];
                             i += 1;
-                            charcopy(
-                                rewrap_option(&mut dest),
-                                &mut destlen,
-                                b'_',
-                            );
-                            charcopy(
-                                rewrap_option(&mut dest),
-                                &mut destlen,
-                                if c == b'_' { b'_' } else { c + 32 },
-                            );
+                            dest.write_byte(b'_');
+                            dest.write_byte(if c == b'_' {
+                                b'_'
+                            } else {
+                                c + 32
+                            });
                         } else {
-                            escape3(
-                                rewrap_option(&mut dest),
-                                &mut destlen,
-                                src[i],
-                            );
+                            escape3(dest, src[i]);
                             i += 1;
                         }
                     }
@@ -462,17 +465,13 @@
         path_state::START => (),
         path_state::A => (),
         path_state::AU => (),
-        path_state::THIRD => {
-            escape3(rewrap_option(&mut dest), &mut destlen, src[i - 1])
-        }
+        path_state::THIRD => escape3(dest, src[i - 1]),
         path_state::C => (),
         path_state::CO => (),
-        path_state::COMLPT => {
-            charcopy(rewrap_option(&mut dest), &mut destlen, src[i - 1])
-        }
+        path_state::COMLPT => dest.write_byte(src[i - 1]),
         path_state::COMLPTn => {
-            escape3(rewrap_option(&mut dest), &mut destlen, src[i - 2]);
-            charcopy(rewrap_option(&mut dest), &mut destlen, src[i - 1]);
+            escape3(dest, src[i - 2]);
+            dest.write_byte(src[i - 1]);
         }
         path_state::L => (),
         path_state::LP => (),
@@ -482,19 +481,18 @@
         path_state::PR => (),
         path_state::LDOT => (),
         path_state::DOT => {
-            memcopy(rewrap_option(&mut dest), &mut destlen, b"~2e");
+            dest.write_bytes(b"~2e");
         }
         path_state::H => (),
         path_state::HGDI => (),
         path_state::SPACE => {
-            memcopy(rewrap_option(&mut dest), &mut destlen, b"~20");
+            dest.write_bytes(b"~20");
         }
         path_state::DEFAULT => (),
-    };
-    destlen
+    }
 }
 
-fn basic_encode(dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+fn basic_encode(dest: &mut impl Sink, src: &[u8]) {
     let twobytes: [u32; 8] = [0, 0, 0x87ff_fffe, 0, 0, 0, 0, 0];
     let onebyte: [u32; 8] =
         [1, 0x2bff_3bfa, 0x6800_0001, 0x2fff_ffff, 0, 0, 0, 0];
@@ -503,24 +501,22 @@
 
 const MAXSTOREPATHLEN: usize = 120;
 
-fn lower_encode(mut dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+fn lower_encode(dest: &mut impl Sink, src: &[u8]) {
     let onebyte: [u32; 8] =
         [1, 0x2bff_fbfb, 0xe800_0001, 0x2fff_ffff, 0, 0, 0, 0];
     let lower: [u32; 8] = [0, 0, 0x07ff_fffe, 0, 0, 0, 0, 0];
-    let mut destlen = 0;
     for c in src {
         if inset(&onebyte, *c) {
-            charcopy(rewrap_option(&mut dest), &mut destlen, *c)
+            dest.write_byte(*c)
         } else if inset(&lower, *c) {
-            charcopy(rewrap_option(&mut dest), &mut destlen, *c + 32)
+            dest.write_byte(*c + 32)
         } else {
-            escape3(rewrap_option(&mut dest), &mut destlen, *c)
+            escape3(dest, *c)
         }
     }
-    destlen
 }
 
-fn aux_encode(dest: Option<&mut [u8]>, src: &[u8]) -> usize {
+fn aux_encode(dest: &mut impl Sink, src: &[u8]) {
     let twobytes = [0; 8];
     let onebyte: [u32; 8] = [!0, 0xffff_3ffe, !0, !0, !0, !0, !0, !0];
     _encode(&twobytes, &onebyte, dest, src, false)
@@ -529,118 +525,98 @@
 fn hash_mangle(src: &[u8], sha: &[u8]) -> Vec<u8> {
     let dirprefixlen = 8;
     let maxshortdirslen = 68;
-    let mut destlen = 0;
 
     let last_slash = src.iter().rposition(|b| *b == b'/');
-    let last_dot: Option<usize> = {
-        let s = last_slash.unwrap_or(0);
-        src[s..]
-            .iter()
-            .rposition(|b| *b == b'.')
-            .and_then(|i| Some(i + s))
+    let basename_start = match last_slash {
+        Some(slash) => slash + 1,
+        None => 0,
+    };
+    let basename = &src[basename_start..];
+    let ext = match basename.iter().rposition(|b| *b == b'.') {
+        None => &[],
+        Some(dot) => &basename[dot..],
     };
 
-    let mut dest = vec![0; MAXSTOREPATHLEN];
-    memcopy(Some(&mut dest), &mut destlen, b"dh/");
+    let mut dest = Vec::with_capacity(MAXSTOREPATHLEN);
+    dest.write_bytes(b"dh/");
 
-    {
-        let mut first = true;
-        for slice in src[..last_slash.unwrap_or_else(|| src.len())]
-            .split(|b| *b == b'/')
-        {
+    if let Some(last_slash) = last_slash {
+        for slice in src[..last_slash].split(|b| *b == b'/') {
             let slice = &slice[..std::cmp::min(slice.len(), dirprefixlen)];
-            if destlen + (slice.len() + if first { 0 } else { 1 })
-                > maxshortdirslen + 3
-            {
+            if dest.len() + slice.len() > maxshortdirslen + 3 {
                 break;
             } else {
-                if !first {
-                    charcopy(Some(&mut dest), &mut destlen, b'/')
-                };
-                memcopy(Some(&mut dest), &mut destlen, slice);
-                if dest[destlen - 1] == b'.' || dest[destlen - 1] == b' ' {
-                    dest[destlen - 1] = b'_'
-                }
+                dest.write_bytes(slice);
             }
-            first = false;
-        }
-        if !first {
-            charcopy(Some(&mut dest), &mut destlen, b'/');
+            dest.write_byte(b'/');
         }
     }
 
-    let used = destlen + 40 + {
-        if let Some(l) = last_dot {
-            src.len() - l
-        } else {
-            0
-        }
-    };
+    let used = dest.len() + 40 + ext.len();
 
     if MAXSTOREPATHLEN > used {
         let slop = MAXSTOREPATHLEN - used;
-        let basenamelen = match last_slash {
-            Some(l) => src.len() - l - 1,
-            None => src.len(),
-        };
-        let basenamelen = std::cmp::min(basenamelen, slop);
-        if basenamelen > 0 {
-            let start = match last_slash {
-                Some(l) => l + 1,
-                None => 0,
-            };
-            memcopy(
-                Some(&mut dest),
-                &mut destlen,
-                &src[start..][..basenamelen],
-            )
-        }
+        let len = std::cmp::min(basename.len(), slop);
+        dest.write_bytes(&basename[..len])
     }
     for c in sha {
-        hexencode(Some(&mut dest), &mut destlen, *c);
-    }
-    if let Some(l) = last_dot {
-        memcopy(Some(&mut dest), &mut destlen, &src[l..]);
+        hexencode(&mut dest, *c);
     }
-    if destlen == dest.len() {
-        dest
-    } else {
-        // sometimes the path are shorter than MAXSTOREPATHLEN
-        dest[..destlen].to_vec()
-    }
+    dest.write_bytes(ext);
+    dest.shrink_to_fit();
+    dest
 }
 
-const MAXENCODE: usize = 4096 * 4;
 fn hash_encode(src: &[u8]) -> Vec<u8> {
-    let dired = &mut [0; MAXENCODE];
-    let lowered = &mut [0; MAXENCODE];
-    let auxed = &mut [0; MAXENCODE];
+    let mut dired: DestArr<MAXENCODE> = DestArr::create();
+    let mut lowered: DestArr<MAXENCODE> = DestArr::create();
+    let mut auxed: DestArr<MAXENCODE> = DestArr::create();
     let baselen = (src.len() - 5) * 3;
     if baselen >= MAXENCODE {
         panic!("path_encode::hash_encore: string too long: {}", baselen)
     };
-    let dirlen = encode_dir(Some(&mut dired[..]), src);
-    let sha = Sha1::digest(&dired[..dirlen]);
-    let lowerlen = lower_encode(Some(&mut lowered[..]), &dired[..dirlen][5..]);
-    let auxlen = aux_encode(Some(&mut auxed[..]), &lowered[..lowerlen]);
-    hash_mangle(&auxed[..auxlen], &sha)
+    encode_dir(&mut dired, src);
+    let sha = Sha1::digest(dired.contents());
+    lower_encode(&mut lowered, &dired.contents()[5..]);
+    aux_encode(&mut auxed, lowered.contents());
+    hash_mangle(auxed.contents(), &sha)
 }
 
 pub fn path_encode(path: &[u8]) -> Vec<u8> {
     let newlen = if path.len() <= MAXSTOREPATHLEN {
-        basic_encode(None, path)
+        let mut measure = MeasureDest::create();
+        basic_encode(&mut measure, path);
+        measure.len
     } else {
-        MAXSTOREPATHLEN + 1
+        return hash_encode(path);
     };
     if newlen <= MAXSTOREPATHLEN {
         if newlen == path.len() {
             path.to_vec()
         } else {
-            let mut res = vec![0; newlen];
-            basic_encode(Some(&mut res), path);
-            res
+            let mut dest = Vec::with_capacity(newlen);
+            basic_encode(&mut dest, path);
+            assert!(dest.len() == newlen);
+            dest
         }
     } else {
-        hash_encode(&path)
+        hash_encode(path)
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::utils::hg_path::HgPathBuf;
+
+    #[test]
+    fn test_long_filename_at_root() {
+        let input = b"data/ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ.i";
+        let expected = b"dh/abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij.i708243a2237a7afae259ea3545a72a2ef11c247b.i";
+        let res = path_encode(input);
+        assert_eq!(
+            HgPathBuf::from_bytes(&res),
+            HgPathBuf::from_bytes(expected)
+        );
+    }
+}
--- a/rust/hg-core/src/revlog/revlog.rs	Thu Mar 02 15:21:36 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,644 +0,0 @@
-use std::borrow::Cow;
-use std::convert::TryFrom;
-use std::io::Read;
-use std::ops::Deref;
-use std::path::Path;
-
-use flate2::read::ZlibDecoder;
-use sha1::{Digest, Sha1};
-use zstd;
-
-use super::index::Index;
-use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
-use super::nodemap;
-use super::nodemap::{NodeMap, NodeMapError};
-use super::nodemap_docket::NodeMapDocket;
-use super::patch;
-use crate::errors::HgError;
-use crate::revlog::Revision;
-use crate::vfs::Vfs;
-use crate::{Node, NULL_REVISION};
-
-const REVISION_FLAG_CENSORED: u16 = 1 << 15;
-const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
-const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
-const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
-
-// Keep this in sync with REVIDX_KNOWN_FLAGS in
-// mercurial/revlogutils/flagutil.py
-const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
-    | REVISION_FLAG_ELLIPSIS
-    | REVISION_FLAG_EXTSTORED
-    | REVISION_FLAG_HASCOPIESINFO;
-
-const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
-
-#[derive(Debug, derive_more::From)]
-pub enum RevlogError {
-    InvalidRevision,
-    /// Working directory is not supported
-    WDirUnsupported,
-    /// Found more than one entry whose ID match the requested prefix
-    AmbiguousPrefix,
-    #[from]
-    Other(HgError),
-}
-
-impl From<NodeMapError> for RevlogError {
-    fn from(error: NodeMapError) -> Self {
-        match error {
-            NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
-            NodeMapError::RevisionNotInIndex(rev) => RevlogError::corrupted(
-                format!("nodemap point to revision {} not in index", rev),
-            ),
-        }
-    }
-}
-
-fn corrupted<S: AsRef<str>>(context: S) -> HgError {
-    HgError::corrupted(format!("corrupted revlog, {}", context.as_ref()))
-}
-
-impl RevlogError {
-    fn corrupted<S: AsRef<str>>(context: S) -> Self {
-        RevlogError::Other(corrupted(context))
-    }
-}
-
-/// Read only implementation of revlog.
-pub struct Revlog {
-    /// When index and data are not interleaved: bytes of the revlog index.
-    /// When index and data are interleaved: bytes of the revlog index and
-    /// data.
-    index: Index,
-    /// When index and data are not interleaved: bytes of the revlog data
-    data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
-    /// When present on disk: the persistent nodemap for this revlog
-    nodemap: Option<nodemap::NodeTree>,
-}
-
-impl Revlog {
-    /// Open a revlog index file.
-    ///
-    /// It will also open the associated data file if index and data are not
-    /// interleaved.
-    pub fn open(
-        store_vfs: &Vfs,
-        index_path: impl AsRef<Path>,
-        data_path: Option<&Path>,
-        use_nodemap: bool,
-    ) -> Result<Self, HgError> {
-        let index_path = index_path.as_ref();
-        let index = {
-            match store_vfs.mmap_open_opt(&index_path)? {
-                None => Index::new(Box::new(vec![])),
-                Some(index_mmap) => {
-                    let index = Index::new(Box::new(index_mmap))?;
-                    Ok(index)
-                }
-            }
-        }?;
-
-        let default_data_path = index_path.with_extension("d");
-
-        // type annotation required
-        // won't recognize Mmap as Deref<Target = [u8]>
-        let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
-            if index.is_inline() {
-                None
-            } else {
-                let data_path = data_path.unwrap_or(&default_data_path);
-                let data_mmap = store_vfs.mmap_open(data_path)?;
-                Some(Box::new(data_mmap))
-            };
-
-        let nodemap = if index.is_inline() {
-            None
-        } else if !use_nodemap {
-            None
-        } else {
-            NodeMapDocket::read_from_file(store_vfs, index_path)?.map(
-                |(docket, data)| {
-                    nodemap::NodeTree::load_bytes(
-                        Box::new(data),
-                        docket.data_length,
-                    )
-                },
-            )
-        };
-
-        Ok(Revlog {
-            index,
-            data_bytes,
-            nodemap,
-        })
-    }
-
-    /// Return number of entries of the `Revlog`.
-    pub fn len(&self) -> usize {
-        self.index.len()
-    }
-
-    /// Returns `true` if the `Revlog` has zero `entries`.
-    pub fn is_empty(&self) -> bool {
-        self.index.is_empty()
-    }
-
-    /// Returns the node ID for the given revision number, if it exists in this
-    /// revlog
-    pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
-        if rev == NULL_REVISION {
-            return Some(&NULL_NODE);
-        }
-        Some(self.index.get_entry(rev)?.hash())
-    }
-
-    /// Return the revision number for the given node ID, if it exists in this
-    /// revlog
-    pub fn rev_from_node(
-        &self,
-        node: NodePrefix,
-    ) -> Result<Revision, RevlogError> {
-        if node.is_prefix_of(&NULL_NODE) {
-            return Ok(NULL_REVISION);
-        }
-
-        if let Some(nodemap) = &self.nodemap {
-            return nodemap
-                .find_bin(&self.index, node)?
-                .ok_or(RevlogError::InvalidRevision);
-        }
-
-        // Fallback to linear scan when a persistent nodemap is not present.
-        // This happens when the persistent-nodemap experimental feature is not
-        // enabled, or for small revlogs.
-        //
-        // TODO: consider building a non-persistent nodemap in memory to
-        // optimize these cases.
-        let mut found_by_prefix = None;
-        for rev in (0..self.len() as Revision).rev() {
-            let index_entry =
-                self.index.get_entry(rev).ok_or(HgError::corrupted(
-                    "revlog references a revision not in the index",
-                ))?;
-            if node == *index_entry.hash() {
-                return Ok(rev);
-            }
-            if node.is_prefix_of(index_entry.hash()) {
-                if found_by_prefix.is_some() {
-                    return Err(RevlogError::AmbiguousPrefix);
-                }
-                found_by_prefix = Some(rev)
-            }
-        }
-        found_by_prefix.ok_or(RevlogError::InvalidRevision)
-    }
-
-    /// Returns whether the given revision exists in this revlog.
-    pub fn has_rev(&self, rev: Revision) -> bool {
-        self.index.get_entry(rev).is_some()
-    }
-
-    /// Return the full data associated to a revision.
-    ///
-    /// All entries required to build the final data out of deltas will be
-    /// retrieved as needed, and the deltas will be applied to the inital
-    /// snapshot to rebuild the final data.
-    pub fn get_rev_data(
-        &self,
-        rev: Revision,
-    ) -> Result<Cow<[u8]>, RevlogError> {
-        if rev == NULL_REVISION {
-            return Ok(Cow::Borrowed(&[]));
-        };
-        Ok(self.get_entry(rev)?.data()?)
-    }
-
-    /// Check the hash of some given data against the recorded hash.
-    pub fn check_hash(
-        &self,
-        p1: Revision,
-        p2: Revision,
-        expected: &[u8],
-        data: &[u8],
-    ) -> bool {
-        let e1 = self.index.get_entry(p1);
-        let h1 = match e1 {
-            Some(ref entry) => entry.hash(),
-            None => &NULL_NODE,
-        };
-        let e2 = self.index.get_entry(p2);
-        let h2 = match e2 {
-            Some(ref entry) => entry.hash(),
-            None => &NULL_NODE,
-        };
-
-        &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
-    }
-
-    /// Build the full data of a revision out its snapshot
-    /// and its deltas.
-    fn build_data_from_deltas(
-        snapshot: RevlogEntry,
-        deltas: &[RevlogEntry],
-    ) -> Result<Vec<u8>, HgError> {
-        let snapshot = snapshot.data_chunk()?;
-        let deltas = deltas
-            .iter()
-            .rev()
-            .map(RevlogEntry::data_chunk)
-            .collect::<Result<Vec<_>, _>>()?;
-        let patches: Vec<_> =
-            deltas.iter().map(|d| patch::PatchList::new(d)).collect();
-        let patch = patch::fold_patch_lists(&patches);
-        Ok(patch.apply(&snapshot))
-    }
-
-    /// Return the revlog data.
-    fn data(&self) -> &[u8] {
-        match self.data_bytes {
-            Some(ref data_bytes) => &data_bytes,
-            None => panic!(
-                "forgot to load the data or trying to access inline data"
-            ),
-        }
-    }
-
-    pub fn make_null_entry(&self) -> RevlogEntry {
-        RevlogEntry {
-            revlog: self,
-            rev: NULL_REVISION,
-            bytes: b"",
-            compressed_len: 0,
-            uncompressed_len: 0,
-            base_rev_or_base_of_delta_chain: None,
-            p1: NULL_REVISION,
-            p2: NULL_REVISION,
-            flags: NULL_REVLOG_ENTRY_FLAGS,
-            hash: NULL_NODE,
-        }
-    }
-
-    /// Get an entry of the revlog.
-    pub fn get_entry(
-        &self,
-        rev: Revision,
-    ) -> Result<RevlogEntry, RevlogError> {
-        if rev == NULL_REVISION {
-            return Ok(self.make_null_entry());
-        }
-        let index_entry = self
-            .index
-            .get_entry(rev)
-            .ok_or(RevlogError::InvalidRevision)?;
-        let start = index_entry.offset();
-        let end = start + index_entry.compressed_len() as usize;
-        let data = if self.index.is_inline() {
-            self.index.data(start, end)
-        } else {
-            &self.data()[start..end]
-        };
-        let entry = RevlogEntry {
-            revlog: self,
-            rev,
-            bytes: data,
-            compressed_len: index_entry.compressed_len(),
-            uncompressed_len: index_entry.uncompressed_len(),
-            base_rev_or_base_of_delta_chain: if index_entry
-                .base_revision_or_base_of_delta_chain()
-                == rev
-            {
-                None
-            } else {
-                Some(index_entry.base_revision_or_base_of_delta_chain())
-            },
-            p1: index_entry.p1(),
-            p2: index_entry.p2(),
-            flags: index_entry.flags(),
-            hash: *index_entry.hash(),
-        };
-        Ok(entry)
-    }
-
-    /// when resolving internal references within revlog, any errors
-    /// should be reported as corruption, instead of e.g. "invalid revision"
-    fn get_entry_internal(
-        &self,
-        rev: Revision,
-    ) -> Result<RevlogEntry, HgError> {
-        self.get_entry(rev)
-            .map_err(|_| corrupted(format!("revision {} out of range", rev)))
-    }
-}
-
-/// The revlog entry's bytes and the necessary informations to extract
-/// the entry's data.
-#[derive(Clone)]
-pub struct RevlogEntry<'a> {
-    revlog: &'a Revlog,
-    rev: Revision,
-    bytes: &'a [u8],
-    compressed_len: u32,
-    uncompressed_len: i32,
-    base_rev_or_base_of_delta_chain: Option<Revision>,
-    p1: Revision,
-    p2: Revision,
-    flags: u16,
-    hash: Node,
-}
-
-impl<'a> RevlogEntry<'a> {
-    pub fn revision(&self) -> Revision {
-        self.rev
-    }
-
-    pub fn node(&self) -> &Node {
-        &self.hash
-    }
-
-    pub fn uncompressed_len(&self) -> Option<u32> {
-        u32::try_from(self.uncompressed_len).ok()
-    }
-
-    pub fn has_p1(&self) -> bool {
-        self.p1 != NULL_REVISION
-    }
-
-    pub fn p1_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
-        if self.p1 == NULL_REVISION {
-            Ok(None)
-        } else {
-            Ok(Some(self.revlog.get_entry(self.p1)?))
-        }
-    }
-
-    pub fn p2_entry(&self) -> Result<Option<RevlogEntry>, RevlogError> {
-        if self.p2 == NULL_REVISION {
-            Ok(None)
-        } else {
-            Ok(Some(self.revlog.get_entry(self.p2)?))
-        }
-    }
-
-    pub fn p1(&self) -> Option<Revision> {
-        if self.p1 == NULL_REVISION {
-            None
-        } else {
-            Some(self.p1)
-        }
-    }
-
-    pub fn p2(&self) -> Option<Revision> {
-        if self.p2 == NULL_REVISION {
-            None
-        } else {
-            Some(self.p2)
-        }
-    }
-
-    pub fn is_censored(&self) -> bool {
-        (self.flags & REVISION_FLAG_CENSORED) != 0
-    }
-
-    pub fn has_length_affecting_flag_processor(&self) -> bool {
-        // Relevant Python code: revlog.size()
-        // note: ELLIPSIS is known to not change the content
-        (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
-    }
-
-    /// The data for this entry, after resolving deltas if any.
-    pub fn rawdata(&self) -> Result<Cow<'a, [u8]>, HgError> {
-        let mut entry = self.clone();
-        let mut delta_chain = vec![];
-
-        // The meaning of `base_rev_or_base_of_delta_chain` depends on
-        // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
-        // `mercurial/revlogutils/constants.py` and the code in
-        // [_chaininfo] and in [index_deltachain].
-        let uses_generaldelta = self.revlog.index.uses_generaldelta();
-        while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
-            let base_rev = if uses_generaldelta {
-                base_rev
-            } else {
-                entry.rev - 1
-            };
-            delta_chain.push(entry);
-            entry = self.revlog.get_entry_internal(base_rev)?;
-        }
-
-        let data = if delta_chain.is_empty() {
-            entry.data_chunk()?
-        } else {
-            Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
-        };
-
-        Ok(data)
-    }
-
-    fn check_data(
-        &self,
-        data: Cow<'a, [u8]>,
-    ) -> Result<Cow<'a, [u8]>, HgError> {
-        if self.revlog.check_hash(
-            self.p1,
-            self.p2,
-            self.hash.as_bytes(),
-            &data,
-        ) {
-            Ok(data)
-        } else {
-            if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
-                return Err(HgError::unsupported(
-                    "ellipsis revisions are not supported by rhg",
-                ));
-            }
-            Err(corrupted(format!(
-                "hash check failed for revision {}",
-                self.rev
-            )))
-        }
-    }
-
-    pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
-        let data = self.rawdata()?;
-        if self.is_censored() {
-            return Err(HgError::CensoredNodeError);
-        }
-        self.check_data(data)
-    }
-
-    /// Extract the data contained in the entry.
-    /// This may be a delta. (See `is_delta`.)
-    fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
-        if self.bytes.is_empty() {
-            return Ok(Cow::Borrowed(&[]));
-        }
-        match self.bytes[0] {
-            // Revision data is the entirety of the entry, including this
-            // header.
-            b'\0' => Ok(Cow::Borrowed(self.bytes)),
-            // Raw revision data follows.
-            b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
-            // zlib (RFC 1950) data.
-            b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
-            // zstd data.
-            b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
-            // A proper new format should have had a repo/store requirement.
-            format_type => Err(corrupted(format!(
-                "unknown compression header '{}'",
-                format_type
-            ))),
-        }
-    }
-
-    fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
-        let mut decoder = ZlibDecoder::new(self.bytes);
-        if self.is_delta() {
-            let mut buf = Vec::with_capacity(self.compressed_len as usize);
-            decoder
-                .read_to_end(&mut buf)
-                .map_err(|e| corrupted(e.to_string()))?;
-            Ok(buf)
-        } else {
-            let cap = self.uncompressed_len.max(0) as usize;
-            let mut buf = vec![0; cap];
-            decoder
-                .read_exact(&mut buf)
-                .map_err(|e| corrupted(e.to_string()))?;
-            Ok(buf)
-        }
-    }
-
-    fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
-        if self.is_delta() {
-            let mut buf = Vec::with_capacity(self.compressed_len as usize);
-            zstd::stream::copy_decode(self.bytes, &mut buf)
-                .map_err(|e| corrupted(e.to_string()))?;
-            Ok(buf)
-        } else {
-            let cap = self.uncompressed_len.max(0) as usize;
-            let mut buf = vec![0; cap];
-            let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
-                .map_err(|e| corrupted(e.to_string()))?;
-            if len != self.uncompressed_len as usize {
-                Err(corrupted("uncompressed length does not match"))
-            } else {
-                Ok(buf)
-            }
-        }
-    }
-
-    /// Tell if the entry is a snapshot or a delta
-    /// (influences on decompression).
-    fn is_delta(&self) -> bool {
-        self.base_rev_or_base_of_delta_chain.is_some()
-    }
-}
-
-/// Calculate the hash of a revision given its data and its parents.
-fn hash(
-    data: &[u8],
-    p1_hash: &[u8],
-    p2_hash: &[u8],
-) -> [u8; NODE_BYTES_LENGTH] {
-    let mut hasher = Sha1::new();
-    let (a, b) = (p1_hash, p2_hash);
-    if a > b {
-        hasher.update(b);
-        hasher.update(a);
-    } else {
-        hasher.update(a);
-        hasher.update(b);
-    }
-    hasher.update(data);
-    *hasher.finalize().as_ref()
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
-    use itertools::Itertools;
-
-    #[test]
-    fn test_empty() {
-        let temp = tempfile::tempdir().unwrap();
-        let vfs = Vfs { base: temp.path() };
-        std::fs::write(temp.path().join("foo.i"), b"").unwrap();
-        let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
-        assert!(revlog.is_empty());
-        assert_eq!(revlog.len(), 0);
-        assert!(revlog.get_entry(0).is_err());
-        assert!(!revlog.has_rev(0));
-    }
-
-    #[test]
-    fn test_inline() {
-        let temp = tempfile::tempdir().unwrap();
-        let vfs = Vfs { base: temp.path() };
-        let node0 = Node::from_hex("2ed2a3912a0b24502043eae84ee4b279c18b90dd")
-            .unwrap();
-        let node1 = Node::from_hex("b004912a8510032a0350a74daa2803dadfb00e12")
-            .unwrap();
-        let node2 = Node::from_hex("dd6ad206e907be60927b5a3117b97dffb2590582")
-            .unwrap();
-        let entry0_bytes = IndexEntryBuilder::new()
-            .is_first(true)
-            .with_version(1)
-            .with_inline(true)
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_node(node0)
-            .build();
-        let entry1_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_node(node1)
-            .build();
-        let entry2_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_p1(0)
-            .with_p2(1)
-            .with_node(node2)
-            .build();
-        let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
-            .into_iter()
-            .flatten()
-            .collect_vec();
-        std::fs::write(temp.path().join("foo.i"), contents).unwrap();
-        let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
-
-        let entry0 = revlog.get_entry(0).ok().unwrap();
-        assert_eq!(entry0.revision(), 0);
-        assert_eq!(*entry0.node(), node0);
-        assert!(!entry0.has_p1());
-        assert_eq!(entry0.p1(), None);
-        assert_eq!(entry0.p2(), None);
-        let p1_entry = entry0.p1_entry().unwrap();
-        assert!(p1_entry.is_none());
-        let p2_entry = entry0.p2_entry().unwrap();
-        assert!(p2_entry.is_none());
-
-        let entry1 = revlog.get_entry(1).ok().unwrap();
-        assert_eq!(entry1.revision(), 1);
-        assert_eq!(*entry1.node(), node1);
-        assert!(!entry1.has_p1());
-        assert_eq!(entry1.p1(), None);
-        assert_eq!(entry1.p2(), None);
-        let p1_entry = entry1.p1_entry().unwrap();
-        assert!(p1_entry.is_none());
-        let p2_entry = entry1.p2_entry().unwrap();
-        assert!(p2_entry.is_none());
-
-        let entry2 = revlog.get_entry(2).ok().unwrap();
-        assert_eq!(entry2.revision(), 2);
-        assert_eq!(*entry2.node(), node2);
-        assert!(entry2.has_p1());
-        assert_eq!(entry2.p1(), Some(0));
-        assert_eq!(entry2.p2(), Some(1));
-        let p1_entry = entry2.p1_entry().unwrap();
-        assert!(p1_entry.is_some());
-        assert_eq!(p1_entry.unwrap().revision(), 0);
-        let p2_entry = entry2.p2_entry().unwrap();
-        assert!(p2_entry.is_some());
-        assert_eq!(p2_entry.unwrap().revision(), 1);
-    }
-}
--- a/rust/hg-core/src/revset.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/revset.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -4,9 +4,9 @@
 
 use crate::errors::HgError;
 use crate::repo::Repo;
-use crate::revlog::revlog::{Revlog, RevlogError};
 use crate::revlog::NodePrefix;
 use crate::revlog::{Revision, NULL_REVISION, WORKING_DIRECTORY_HEX};
+use crate::revlog::{Revlog, RevlogError};
 use crate::Node;
 
 /// Resolve a query string into a single revision.
@@ -21,7 +21,7 @@
     match input {
         "." => {
             let p1 = repo.dirstate_parents()?.p1;
-            return Ok(changelog.revlog.rev_from_node(p1.into())?);
+            return changelog.revlog.rev_from_node(p1.into());
         }
         "null" => return Ok(NULL_REVISION),
         _ => {}
@@ -33,7 +33,7 @@
             let msg = format!("cannot parse revset '{}'", input);
             Err(HgError::unsupported(msg).into())
         }
-        result => return result,
+        result => result,
     }
 }
 
--- a/rust/hg-core/src/sparse.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/sparse.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -164,7 +164,7 @@
 fn read_temporary_includes(
     repo: &Repo,
 ) -> Result<Vec<Vec<u8>>, SparseConfigError> {
-    let raw = repo.hg_vfs().try_read("tempsparse")?.unwrap_or(vec![]);
+    let raw = repo.hg_vfs().try_read("tempsparse")?.unwrap_or_default();
     if raw.is_empty() {
         return Ok(vec![]);
     }
@@ -179,7 +179,7 @@
     if !repo.has_sparse() {
         return Ok(None);
     }
-    let raw = repo.hg_vfs().try_read("sparse")?.unwrap_or(vec![]);
+    let raw = repo.hg_vfs().try_read("sparse")?.unwrap_or_default();
 
     if raw.is_empty() {
         return Ok(None);
@@ -200,9 +200,10 @@
             let output =
                 cat(repo, &rev.to_string(), vec![HgPath::new(&profile)])
                     .map_err(|_| {
-                        HgError::corrupted(format!(
+                        HgError::corrupted(
                             "dirstate points to non-existent parent node"
-                        ))
+                                .to_string(),
+                        )
                     })?;
             if output.results.is_empty() {
                 config.warnings.push(SparseWarning::ProfileNotFound {
@@ -252,9 +253,9 @@
         repo.changelog()?
             .rev_from_node(parents.p1.into())
             .map_err(|_| {
-                HgError::corrupted(format!(
-                    "dirstate points to non-existent parent node"
-                ))
+                HgError::corrupted(
+                    "dirstate points to non-existent parent node".to_string(),
+                )
             })?;
     if p1_rev != NULL_REVISION {
         revs.push(p1_rev)
@@ -263,9 +264,9 @@
         repo.changelog()?
             .rev_from_node(parents.p2.into())
             .map_err(|_| {
-                HgError::corrupted(format!(
-                    "dirstate points to non-existent parent node"
-                ))
+                HgError::corrupted(
+                    "dirstate points to non-existent parent node".to_string(),
+                )
             })?;
     if p2_rev != NULL_REVISION {
         revs.push(p2_rev)
@@ -325,7 +326,7 @@
     }
     let forced_include_matcher = IncludeMatcher::new(
         temp_includes
-            .into_iter()
+            .iter()
             .map(|include| {
                 IgnorePattern::new(PatternSyntax::Path, include, Path::new(""))
             })
--- a/rust/hg-core/src/utils.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/utils.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -138,26 +138,8 @@
     }
 
     fn split_2_by_slice(&self, separator: &[u8]) -> Option<(&[u8], &[u8])> {
-        if let Some(pos) = find_slice_in_slice(self, separator) {
-            Some((&self[..pos], &self[pos + separator.len()..]))
-        } else {
-            None
-        }
-    }
-}
-
-pub trait StrExt {
-    // TODO: Use https://doc.rust-lang.org/nightly/std/primitive.str.html#method.split_once
-    // once we require Rust 1.52+
-    fn split_2(&self, separator: char) -> Option<(&str, &str)>;
-}
-
-impl StrExt for str {
-    fn split_2(&self, separator: char) -> Option<(&str, &str)> {
-        let mut iter = self.splitn(2, separator);
-        let a = iter.next()?;
-        let b = iter.next()?;
-        Some((a, b))
+        find_slice_in_slice(self, separator)
+            .map(|pos| (&self[..pos], &self[pos + separator.len()..]))
     }
 }
 
@@ -212,28 +194,20 @@
     }
 }
 
-// TODO: use the str method when we require Rust 1.45
-pub(crate) fn strip_suffix<'a>(s: &'a str, suffix: &str) -> Option<&'a str> {
-    if s.ends_with(suffix) {
-        Some(&s[..s.len() - suffix.len()])
-    } else {
-        None
-    }
-}
-
 #[cfg(unix)]
 pub fn shell_quote(value: &[u8]) -> Vec<u8> {
-    // TODO: Use the `matches!` macro when we require Rust 1.42+
-    if value.iter().all(|&byte| match byte {
-        b'a'..=b'z'
-        | b'A'..=b'Z'
-        | b'0'..=b'9'
-        | b'.'
-        | b'_'
-        | b'/'
-        | b'+'
-        | b'-' => true,
-        _ => false,
+    if value.iter().all(|&byte| {
+        matches!(
+            byte,
+            b'a'..=b'z'
+            | b'A'..=b'Z'
+            | b'0'..=b'9'
+            | b'.'
+            | b'_'
+            | b'/'
+            | b'+'
+            | b'-'
+        )
     }) {
         value.to_owned()
     } else {
@@ -318,9 +292,9 @@
 }
 
 pub(crate) enum MergeResult<V> {
-    UseLeftValue,
-    UseRightValue,
-    UseNewValue(V),
+    Left,
+    Right,
+    New(V),
 }
 
 /// Return the union of the two given maps,
@@ -361,10 +335,10 @@
         ordmap_union_with_merge_by_iter(right, left, |key, a, b| {
             // Also swapped in `merge` arguments:
             match merge(key, b, a) {
-                MergeResult::UseNewValue(v) => MergeResult::UseNewValue(v),
+                MergeResult::New(v) => MergeResult::New(v),
                 // … and swap back in `merge` result:
-                MergeResult::UseLeftValue => MergeResult::UseRightValue,
-                MergeResult::UseRightValue => MergeResult::UseLeftValue,
+                MergeResult::Left => MergeResult::Right,
+                MergeResult::Right => MergeResult::Left,
             }
         })
     } else {
@@ -389,11 +363,11 @@
                 left.insert(key, right_value);
             }
             Some(left_value) => match merge(&key, left_value, &right_value) {
-                MergeResult::UseLeftValue => {}
-                MergeResult::UseRightValue => {
+                MergeResult::Left => {}
+                MergeResult::Right => {
                     left.insert(key, right_value);
                 }
-                MergeResult::UseNewValue(new_value) => {
+                MergeResult::New(new_value) => {
                     left.insert(key, new_value);
                 }
             },
@@ -418,7 +392,7 @@
     // TODO: if/when https://github.com/bodil/im-rs/pull/168 is accepted,
     // change these from `Vec<(K, V)>` to `Vec<(&K, Cow<V>)>`
     // with `left_updates` only borrowing from `right` and `right_updates` from
-    // `left`, and with `Cow::Owned` used for `MergeResult::UseNewValue`.
+    // `left`, and with `Cow::Owned` used for `MergeResult::New`.
     //
     // This would allow moving all `.clone()` calls to after we’ve decided
     // which of `right_updates` or `left_updates` to use
@@ -439,13 +413,13 @@
                 old: (key, left_value),
                 new: (_, right_value),
             } => match merge(key, left_value, right_value) {
-                MergeResult::UseLeftValue => {
+                MergeResult::Left => {
                     right_updates.push((key.clone(), left_value.clone()))
                 }
-                MergeResult::UseRightValue => {
+                MergeResult::Right => {
                     left_updates.push((key.clone(), right_value.clone()))
                 }
-                MergeResult::UseNewValue(new_value) => {
+                MergeResult::New(new_value) => {
                     left_updates.push((key.clone(), new_value.clone()));
                     right_updates.push((key.clone(), new_value))
                 }
@@ -504,3 +478,23 @@
         Ok(())
     }
 }
+
+/// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
+///
+/// The callback is only called for incoming `Ok` values. Errors are passed
+/// through as-is. In order to let it use the `?` operator the callback is
+/// expected to return a `Result` of `Option`, instead of an `Option` of
+/// `Result`.
+pub fn filter_map_results<'a, I, F, A, B, E>(
+    iter: I,
+    f: F,
+) -> impl Iterator<Item = Result<B, E>> + 'a
+where
+    I: Iterator<Item = Result<A, E>> + 'a,
+    F: Fn(A) -> Result<Option<B>, E> + 'a,
+{
+    iter.filter_map(move |result| match result {
+        Ok(node) => f(node).transpose(),
+        Err(e) => Some(Err(e)),
+    })
+}
--- a/rust/hg-core/src/utils/debug.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/utils/debug.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -81,7 +81,7 @@
 }
 
 pub fn debug_wait_for_file_or_print(config: &Config, config_option: &str) {
-    if let Err(e) = debug_wait_for_file(&config, config_option) {
+    if let Err(e) = debug_wait_for_file(config, config_option) {
         eprintln!("{e}");
     };
 }
--- a/rust/hg-core/src/utils/files.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/utils/files.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -230,7 +230,7 @@
         // TODO hint to the user about using --cwd
         // Bubble up the responsibility to Python for now
         Err(HgPathError::NotUnderRoot {
-            path: original_name.to_owned(),
+            path: original_name,
             root: root.to_owned(),
         })
     }
@@ -424,7 +424,7 @@
         assert_eq!(
             canonical_path(&root, Path::new(""), &beneath_repo),
             Err(HgPathError::NotUnderRoot {
-                path: beneath_repo.to_owned(),
+                path: beneath_repo,
                 root: root.to_owned()
             })
         );
--- a/rust/hg-core/src/utils/hg_path.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/utils/hg_path.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -8,7 +8,6 @@
 use crate::utils::SliceExt;
 use std::borrow::Borrow;
 use std::borrow::Cow;
-use std::convert::TryFrom;
 use std::ffi::{OsStr, OsString};
 use std::fmt;
 use std::ops::Deref;
@@ -206,7 +205,7 @@
     /// ```
     pub fn split_filename(&self) -> (&Self, &Self) {
         match &self.inner.iter().rposition(|c| *c == b'/') {
-            None => (HgPath::new(""), &self),
+            None => (HgPath::new(""), self),
             Some(size) => (
                 HgPath::new(&self.inner[..*size]),
                 HgPath::new(&self.inner[*size + 1..]),
@@ -327,7 +326,7 @@
     #[cfg(unix)]
     /// Split a pathname into drive and path. On Posix, drive is always empty.
     pub fn split_drive(&self) -> (&HgPath, &HgPath) {
-        (HgPath::new(b""), &self)
+        (HgPath::new(b""), self)
     }
 
     /// Checks for errors in the path, short-circuiting at the first one.
@@ -397,7 +396,7 @@
         Default::default()
     }
 
-    pub fn push<T: ?Sized + AsRef<HgPath>>(&mut self, other: &T) -> () {
+    pub fn push<T: ?Sized + AsRef<HgPath>>(&mut self, other: &T) {
         if !self.inner.is_empty() && self.inner.last() != Some(&b'/') {
             self.inner.push(b'/');
         }
@@ -432,7 +431,7 @@
 
     #[inline]
     fn deref(&self) -> &HgPath {
-        &HgPath::new(&self.inner)
+        HgPath::new(&self.inner)
     }
 }
 
@@ -442,15 +441,15 @@
     }
 }
 
-impl Into<Vec<u8>> for HgPathBuf {
-    fn into(self) -> Vec<u8> {
-        self.inner
+impl From<HgPathBuf> for Vec<u8> {
+    fn from(val: HgPathBuf) -> Self {
+        val.inner
     }
 }
 
 impl Borrow<HgPath> for HgPathBuf {
     fn borrow(&self) -> &HgPath {
-        &HgPath::new(self.as_bytes())
+        HgPath::new(self.as_bytes())
     }
 }
 
@@ -492,7 +491,7 @@
     #[cfg(unix)]
     {
         use std::os::unix::ffi::OsStrExt;
-        os_str = std::ffi::OsStr::from_bytes(&hg_path.as_ref().as_bytes());
+        os_str = std::ffi::OsStr::from_bytes(hg_path.as_ref().as_bytes());
     }
     // TODO Handle other platforms
     // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
@@ -512,7 +511,7 @@
     #[cfg(unix)]
     {
         use std::os::unix::ffi::OsStrExt;
-        buf = HgPathBuf::from_bytes(&os_string.as_ref().as_bytes());
+        buf = HgPathBuf::from_bytes(os_string.as_ref().as_bytes());
     }
     // TODO Handle other platforms
     // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
@@ -529,7 +528,7 @@
     #[cfg(unix)]
     {
         use std::os::unix::ffi::OsStrExt;
-        buf = HgPathBuf::from_bytes(&os_str.as_bytes());
+        buf = HgPathBuf::from_bytes(os_str.as_bytes());
     }
     // TODO Handle other platforms
     // TODO: convert from WTF8 to Windows MBCS (ANSI encoding).
--- a/rust/hg-core/src/vfs.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/src/vfs.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -48,7 +48,7 @@
         match self.read(relative_path) {
             Err(e) => match &e {
                 HgError::IoError { error, .. } => match error.kind() {
-                    ErrorKind::NotFound => return Ok(None),
+                    ErrorKind::NotFound => Ok(None),
                     _ => Err(e),
                 },
                 _ => Err(e),
--- a/rust/hg-core/tests/test_missing_ancestors.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-core/tests/test_missing_ancestors.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -38,7 +38,7 @@
             // p2 is a random revision lower than i and different from p1
             let mut p2 = rng.gen_range(0..i - 1) as Revision;
             if p2 >= p1 {
-                p2 = p2 + 1;
+                p2 += 1;
             }
             vg.push([p1, p2]);
         } else if rng.gen_bool(prevprob) {
@@ -53,7 +53,7 @@
 /// Compute the ancestors set of all revisions of a VecGraph
 fn ancestors_sets(vg: &VecGraph) -> Vec<HashSet<Revision>> {
     let mut ancs: Vec<HashSet<Revision>> = Vec::new();
-    for i in 0..vg.len() {
+    (0..vg.len()).for_each(|i| {
         let mut ancs_i = HashSet::new();
         ancs_i.insert(i as Revision);
         for p in vg[i].iter().cloned() {
@@ -62,7 +62,7 @@
             }
         }
         ancs.push(ancs_i);
-    }
+    });
     ancs
 }
 
@@ -95,9 +95,9 @@
         random_seed: &str,
     ) -> Self {
         Self {
-            ancestors_sets: ancestors_sets,
+            ancestors_sets,
             bases: bases.clone(),
-            graph: graph,
+            graph,
             history: vec![MissingAncestorsAction::InitialBases(bases.clone())],
             random_seed: random_seed.into(),
         }
@@ -116,7 +116,7 @@
         for base in self.bases.iter().cloned() {
             if base != NULL_REVISION {
                 for rev in &self.ancestors_sets[base as usize] {
-                    revs.remove(&rev);
+                    revs.remove(rev);
                 }
             }
         }
@@ -140,12 +140,12 @@
         for base in self.bases.iter().cloned() {
             if base != NULL_REVISION {
                 for rev in &self.ancestors_sets[base as usize] {
-                    missing.remove(&rev);
+                    missing.remove(rev);
                 }
             }
         }
         let mut res: Vec<Revision> = missing.iter().cloned().collect();
-        res.sort();
+        res.sort_unstable();
         res
     }
 
@@ -196,7 +196,7 @@
     let nb = min(maxrev as usize, log_normal.sample(rng).floor() as usize);
 
     let dist = Uniform::from(NULL_REVISION..maxrev);
-    return rng.sample_iter(&dist).take(nb).collect();
+    rng.sample_iter(&dist).take(nb).collect()
 }
 
 /// Produces the hexadecimal representation of a slice of bytes
--- a/rust/hg-cpython/Cargo.toml	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/Cargo.toml	Thu Mar 02 22:45:44 2023 +0100
@@ -2,18 +2,18 @@
 name = "hg-cpython"
 version = "0.1.0"
 authors = ["Georges Racinet <gracinet@anybox.fr>"]
-edition = "2018"
+edition = "2021"
 
 [lib]
 name='rusthg'
 crate-type = ["cdylib"]
 
 [dependencies]
-cpython = { version = "0.7.0", features = ["extension-module"] }
-crossbeam-channel = "0.5.2"
+cpython = { version = "0.7.1", features = ["extension-module"] }
+crossbeam-channel = "0.5.6"
 hg-core = { path = "../hg-core"}
-libc = "0.2.119"
-log = "0.4.14"
-env_logger = "0.9.0"
+libc = "0.2.137"
+log = "0.4.17"
+env_logger = "0.9.3"
 stable_deref_trait = "1.2.0"
 vcsgraph = "0.2.0"
--- a/rust/hg-cpython/src/conversion.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/conversion.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -10,7 +10,6 @@
 
 use cpython::{ObjectProtocol, PyObject, PyResult, Python};
 use hg::Revision;
-use std::iter::FromIterator;
 
 /// Utility function to convert a Python iterable into various collections
 ///
--- a/rust/hg-cpython/src/copy_tracing.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/copy_tracing.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -103,7 +103,7 @@
                 // thread can drop it. Otherwise the GIL would be implicitly
                 // acquired here through `impl Drop for PyBytes`.
                 if let Some(bytes) = opt_bytes {
-                    if let Err(_) = pybytes_sender.send(bytes.unwrap()) {
+                    if pybytes_sender.send(bytes.unwrap()).is_err() {
                         // The channel is disconnected, meaning the parent
                         // thread panicked or returned
                         // early through
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -98,7 +98,7 @@
 
     def __contains__(&self, item: PyObject) -> PyResult<bool> {
         Ok(self.inner(py).borrow().contains(HgPath::new(
-            item.extract::<PyBytes>(py)?.data(py).as_ref(),
+            item.extract::<PyBytes>(py)?.data(py),
         )))
     }
 });
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -9,7 +9,6 @@
 //! `hg-core` package.
 
 use std::cell::{RefCell, RefMut};
-use std::convert::TryInto;
 
 use cpython::{
     exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
@@ -122,9 +121,7 @@
         let bytes = f.extract::<PyBytes>(py)?;
         let path = HgPath::new(bytes.data(py));
         let res = self.inner(py).borrow_mut().set_tracked(path);
-        let was_tracked = res.or_else(|_| {
-            Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
-        })?;
+        let was_tracked = res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
         Ok(was_tracked.to_py_object(py))
     }
 
@@ -132,9 +129,7 @@
         let bytes = f.extract::<PyBytes>(py)?;
         let path = HgPath::new(bytes.data(py));
         let res = self.inner(py).borrow_mut().set_untracked(path);
-        let was_tracked = res.or_else(|_| {
-            Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
-        })?;
+        let was_tracked = res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
         Ok(was_tracked.to_py_object(py))
     }
 
@@ -154,9 +149,7 @@
         let res = self.inner(py).borrow_mut().set_clean(
             path, mode, size, timestamp,
         );
-        res.or_else(|_| {
-            Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
-        })?;
+        res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
         Ok(PyNone)
     }
 
@@ -164,9 +157,7 @@
         let bytes = f.extract::<PyBytes>(py)?;
         let path = HgPath::new(bytes.data(py));
         let res = self.inner(py).borrow_mut().set_possibly_dirty(path);
-        res.or_else(|_| {
-            Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
-        })?;
+        res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
         Ok(PyNone)
     }
 
@@ -213,9 +204,7 @@
             has_meaningful_mtime,
             parent_file_data,
         );
-        res.or_else(|_| {
-            Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
-        })?;
+        res.map_err(|_| PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))?;
         Ok(PyNone)
     }
 
--- a/rust/hg-cpython/src/dirstate/item.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/dirstate/item.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -40,7 +40,7 @@
             }
         }
         let entry = DirstateEntry::from_v2_data(DirstateV2Data {
-            wc_tracked: wc_tracked,
+            wc_tracked,
             p1_tracked,
             p2_info,
             mode_size: mode_size_opt,
@@ -151,6 +151,10 @@
         Ok(self.entry(py).get().added())
     }
 
+    @property
+    def modified(&self) -> PyResult<bool> {
+        Ok(self.entry(py).get().modified())
+    }
 
     @property
     def p2_info(&self) -> PyResult<bool> {
--- a/rust/hg-cpython/src/dirstate/status.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/dirstate/status.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -72,12 +72,11 @@
     for (path, bad_match) in collection.iter() {
         let message = match bad_match {
             BadMatch::OsError(code) => get_error_message(*code)?,
-            BadMatch::BadType(bad_type) => format!(
-                "unsupported file type (type is {})",
-                bad_type.to_string()
-            )
-            .to_py_object(py)
-            .into_object(),
+            BadMatch::BadType(bad_type) => {
+                format!("unsupported file type (type is {})", bad_type)
+                    .to_py_object(py)
+                    .into_object()
+            }
         };
         list.append(
             py,
--- a/rust/hg-cpython/src/lib.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/lib.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -18,6 +18,11 @@
 //! >>> ancestor.__doc__
 //! 'Generic DAG ancestor algorithms - Rust implementation'
 //! ```
+#![allow(clippy::too_many_arguments)] // rust-cpython macros
+#![allow(clippy::zero_ptr)] // rust-cpython macros
+#![allow(clippy::needless_update)] // rust-cpython macros
+#![allow(clippy::manual_strip)] // rust-cpython macros
+#![allow(clippy::type_complexity)] // rust-cpython macros
 
 /// This crate uses nested private macros, `extern crate` is still needed in
 /// 2018 edition.
--- a/rust/hg-cpython/src/pybytes_deref.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/pybytes_deref.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -47,6 +47,7 @@
 
 #[allow(unused)]
 fn static_assert_pybytes_is_send() {
+    #[allow(clippy::no_effect)]
     require_send::<PyBytes>;
 }
 
--- a/rust/hg-cpython/src/revlog.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/revlog.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -144,9 +144,9 @@
         // __delitem__ is both for `del idx[r]` and `del idx[r1:r2]`
         self.cindex(py).borrow().inner().del_item(py, key)?;
         let mut opt = self.get_nodetree(py)?.borrow_mut();
-        let mut nt = opt.as_mut().unwrap();
+        let nt = opt.as_mut().unwrap();
         nt.invalidate_all();
-        self.fill_nodemap(py, &mut nt)?;
+        self.fill_nodemap(py, nt)?;
         Ok(())
     }
 
--- a/rust/hg-cpython/src/utils.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/hg-cpython/src/utils.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,7 +1,6 @@
 use cpython::exc::ValueError;
 use cpython::{PyBytes, PyDict, PyErr, PyObject, PyResult, PyTuple, Python};
 use hg::revlog::Node;
-use std::convert::TryFrom;
 
 #[allow(unused)]
 pub fn print_python_trace(py: Python) -> PyResult<PyObject> {
--- a/rust/rhg/Cargo.toml	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/Cargo.toml	Thu Mar 02 22:45:44 2023 +0100
@@ -5,21 +5,21 @@
     "Antoine Cezar <antoine.cezar@octobus.net>",
     "Raphaël Gomès <raphael.gomes@octobus.net>",
 ]
-edition = "2018"
+edition = "2021"
 
 [dependencies]
 atty = "0.2.14"
 hg-core = { path = "../hg-core"}
-chrono = "0.4.19"
-clap = "2.34.0"
+chrono = "0.4.23"
+clap = { version = "4.0.24", features = ["cargo"] }
 derive_more = "0.99.17"
-home = "0.5.3"
+home = "0.5.4"
 lazy_static = "1.4.0"
-log = "0.4.14"
-micro-timer = "0.4.0"
-regex = "1.5.5"
-env_logger = "0.9.0"
+log = "0.4.17"
+logging_timer = "1.1.0"
+regex = "1.7.0"
+env_logger = "0.9.3"
 format-bytes = "0.3.0"
 users = "0.11.0"
-which = "4.2.5"
+which = "4.3.0"
 rayon = "1.6.1"
--- a/rust/rhg/src/color.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/color.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -205,16 +205,14 @@
             return Err(HgError::unsupported("debug color mode"));
         }
         let auto = enabled == b"auto";
-        let always;
-        if !auto {
+        let always = if !auto {
             let enabled_bool = config.get_bool(b"ui", b"color")?;
             if !enabled_bool {
                 return Ok(None);
             }
-            always = enabled == b"always"
-                || *origin == ConfigOrigin::CommandLineColor
+            enabled == b"always" || *origin == ConfigOrigin::CommandLineColor
         } else {
-            always = false
+            false
         };
         let formatted = always
             || (std::env::var_os("TERM").unwrap_or_default() != "dumb"
@@ -245,11 +243,8 @@
 impl ColorConfig {
     // Similar to _modesetup in mercurial/color.py
     pub fn new(config: &Config) -> Result<Option<Self>, HgError> {
-        Ok(match ColorMode::get(config)? {
-            None => None,
-            Some(ColorMode::Ansi) => Some(ColorConfig {
-                styles: effects_from_config(config),
-            }),
-        })
+        Ok(ColorMode::get(config)?.map(|ColorMode::Ansi| ColorConfig {
+            styles: effects_from_config(config),
+        }))
     }
 }
--- a/rust/rhg/src/commands/cat.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/cat.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -3,35 +3,34 @@
 use format_bytes::format_bytes;
 use hg::operations::cat;
 use hg::utils::hg_path::HgPathBuf;
-use micro_timer::timed;
-use std::convert::TryFrom;
+use std::ffi::OsString;
+use std::os::unix::prelude::OsStrExt;
 
 pub const HELP_TEXT: &str = "
 Output the current or given revision of files
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    clap::SubCommand::with_name("cat")
+pub fn args() -> clap::Command {
+    clap::command!("cat")
         .arg(
-            Arg::with_name("rev")
+            Arg::new("rev")
                 .help("search the repository as it is in REV")
-                .short("-r")
-                .long("--rev")
-                .value_name("REV")
-                .takes_value(true),
+                .short('r')
+                .long("rev")
+                .value_name("REV"),
         )
         .arg(
-            clap::Arg::with_name("files")
+            clap::Arg::new("files")
                 .required(true)
-                .multiple(true)
-                .empty_values(false)
+                .num_args(1..)
                 .value_name("FILE")
+                .value_parser(clap::value_parser!(std::ffi::OsString))
                 .help("Files to output"),
         )
         .about(HELP_TEXT)
 }
 
-#[timed]
+#[logging_timer::time("trace")]
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
     let cat_enabled_default = true;
     let cat_enabled = invocation.config.get_option(b"rhg", b"cat")?;
@@ -42,11 +41,15 @@
         ));
     }
 
-    let rev = invocation.subcommand_args.value_of("rev");
-    let file_args = match invocation.subcommand_args.values_of("files") {
-        Some(files) => files.collect(),
-        None => vec![],
-    };
+    let rev = invocation.subcommand_args.get_one::<String>("rev");
+    let file_args =
+        match invocation.subcommand_args.get_many::<OsString>("files") {
+            Some(files) => files
+                .filter(|s| !s.is_empty())
+                .map(|s| s.as_os_str())
+                .collect(),
+            None => vec![],
+        };
 
     let repo = invocation.repo?;
     let cwd = hg::utils::current_dir()?;
@@ -54,8 +57,8 @@
     let working_directory = cwd.join(working_directory); // Make it absolute
 
     let mut files = vec![];
-    for file in file_args.iter() {
-        if file.starts_with("set:") {
+    for file in file_args {
+        if file.as_bytes().starts_with(b"set:") {
             let message = "fileset";
             return Err(CommandError::unsupported(message));
         }
@@ -63,7 +66,7 @@
         let normalized = cwd.join(&file);
         // TODO: actually normalize `..` path segments etc?
         let dotted = normalized.components().any(|c| c.as_os_str() == "..");
-        if file == &"." || dotted {
+        if file.as_bytes() == b"." || dotted {
             let message = "`..` or `.` path segment";
             return Err(CommandError::unsupported(message));
         }
@@ -75,7 +78,7 @@
             .map_err(|_| {
                 CommandError::abort(format!(
                     "abort: {} not under root '{}'\n(consider using '--cwd {}')",
-                    file,
+                    String::from_utf8_lossy(file.as_bytes()),
                     working_directory.display(),
                     relative_path.display(),
                 ))
@@ -92,7 +95,7 @@
         None => format!("{:x}", repo.dirstate_parents()?.p1),
     };
 
-    let output = cat(&repo, &rev, files).map_err(|e| (e, rev.as_str()))?;
+    let output = cat(repo, &rev, files).map_err(|e| (e, rev.as_str()))?;
     for (_file, contents) in output.results {
         invocation.ui.write_stdout(&contents)?;
     }
--- a/rust/rhg/src/commands/config.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/config.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -8,14 +8,13 @@
 With one argument of the form section.name, print just the value of that config item.
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    clap::SubCommand::with_name("config")
+pub fn args() -> clap::Command {
+    clap::command!("config")
         .arg(
-            Arg::with_name("name")
+            Arg::new("name")
                 .help("the section.name to print")
                 .value_name("NAME")
-                .required(true)
-                .takes_value(true),
+                .required(true),
         )
         .about(HELP_TEXT)
 }
@@ -23,7 +22,7 @@
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
     let (section, name) = invocation
         .subcommand_args
-        .value_of("name")
+        .get_one::<String>("name")
         .expect("missing required CLI argument")
         .as_bytes()
         .split_2(b'.')
--- a/rust/rhg/src/commands/debugdata.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/debugdata.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -2,33 +2,32 @@
 use clap::Arg;
 use clap::ArgGroup;
 use hg::operations::{debug_data, DebugDataKind};
-use micro_timer::timed;
 
 pub const HELP_TEXT: &str = "
 Dump the contents of a data file revision
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    clap::SubCommand::with_name("debugdata")
+pub fn args() -> clap::Command {
+    clap::command!("debugdata")
         .arg(
-            Arg::with_name("changelog")
+            Arg::new("changelog")
                 .help("open changelog")
-                .short("-c")
-                .long("--changelog"),
+                .short('c')
+                .action(clap::ArgAction::SetTrue),
         )
         .arg(
-            Arg::with_name("manifest")
+            Arg::new("manifest")
                 .help("open manifest")
-                .short("-m")
-                .long("--manifest"),
+                .short('m')
+                .action(clap::ArgAction::SetTrue),
         )
         .group(
-            ArgGroup::with_name("")
+            ArgGroup::new("revlog")
                 .args(&["changelog", "manifest"])
                 .required(true),
         )
         .arg(
-            Arg::with_name("rev")
+            Arg::new("rev")
                 .help("revision")
                 .required(true)
                 .value_name("REV"),
@@ -36,23 +35,25 @@
         .about(HELP_TEXT)
 }
 
-#[timed]
+#[logging_timer::time("trace")]
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
     let args = invocation.subcommand_args;
     let rev = args
-        .value_of("rev")
+        .get_one::<String>("rev")
         .expect("rev should be a required argument");
-    let kind =
-        match (args.is_present("changelog"), args.is_present("manifest")) {
-            (true, false) => DebugDataKind::Changelog,
-            (false, true) => DebugDataKind::Manifest,
-            (true, true) => {
-                unreachable!("Should not happen since options are exclusive")
-            }
-            (false, false) => {
-                unreachable!("Should not happen since options are required")
-            }
-        };
+    let kind = match (
+        args.get_one::<bool>("changelog").unwrap(),
+        args.get_one::<bool>("manifest").unwrap(),
+    ) {
+        (true, false) => DebugDataKind::Changelog,
+        (false, true) => DebugDataKind::Manifest,
+        (true, true) => {
+            unreachable!("Should not happen since options are exclusive")
+        }
+        (false, false) => {
+            unreachable!("Should not happen since options are required")
+        }
+    };
 
     let repo = invocation.repo?;
     if repo.has_narrow() {
@@ -60,7 +61,7 @@
             "support for ellipsis nodes is missing and repo has narrow enabled",
         ));
     }
-    let data = debug_data(repo, rev, kind).map_err(|e| (e, rev))?;
+    let data = debug_data(repo, rev, kind).map_err(|e| (e, rev.as_ref()))?;
 
     let mut stdout = invocation.ui.stdout_buffer();
     stdout.write_all(&data)?;
--- a/rust/rhg/src/commands/debugignorerhg.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/debugignorerhg.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,5 +1,4 @@
 use crate::error::CommandError;
-use clap::SubCommand;
 use hg;
 use hg::matchers::get_ignore_matcher;
 use hg::StatusError;
@@ -13,8 +12,8 @@
 Some options might be missing, check the list below.
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    SubCommand::with_name("debugignorerhg").about(HELP_TEXT)
+pub fn args() -> clap::Command {
+    clap::command!("debugignorerhg").about(HELP_TEXT)
 }
 
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
@@ -24,10 +23,10 @@
 
     let (ignore_matcher, warnings) = get_ignore_matcher(
         vec![ignore_file],
-        &repo.working_directory_path().to_owned(),
+        repo.working_directory_path(),
         &mut |_source, _pattern_bytes| (),
     )
-    .map_err(|e| StatusError::from(e))?;
+    .map_err(StatusError::from)?;
 
     if !warnings.is_empty() {
         warn!("Pattern warnings: {:?}", &warnings);
--- a/rust/rhg/src/commands/debugrequirements.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/debugrequirements.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -4,8 +4,8 @@
 Print the current repo requirements.
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    clap::SubCommand::with_name("debugrequirements").about(HELP_TEXT)
+pub fn args() -> clap::Command {
+    clap::command!("debugrequirements").about(HELP_TEXT)
 }
 
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
--- a/rust/rhg/src/commands/debugrhgsparse.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/debugrhgsparse.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,19 +1,21 @@
-use std::os::unix::prelude::OsStrExt;
+use std::{
+    ffi::{OsStr, OsString},
+    os::unix::prelude::OsStrExt,
+};
 
 use crate::error::CommandError;
-use clap::SubCommand;
 use hg::{self, utils::hg_path::HgPath};
 
 pub const HELP_TEXT: &str = "";
 
-pub fn args() -> clap::App<'static, 'static> {
-    SubCommand::with_name("debugrhgsparse")
+pub fn args() -> clap::Command {
+    clap::command!("debugrhgsparse")
         .arg(
-            clap::Arg::with_name("files")
+            clap::Arg::new("files")
+                .value_name("FILES")
                 .required(true)
-                .multiple(true)
-                .empty_values(false)
-                .value_name("FILES")
+                .num_args(1..)
+                .value_parser(clap::value_parser!(std::ffi::OsString))
                 .help("Files to check against sparse profile"),
         )
         .about(HELP_TEXT)
@@ -22,9 +24,13 @@
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
     let repo = invocation.repo?;
 
-    let (matcher, _warnings) = hg::sparse::matcher(&repo).unwrap();
-    let files = invocation.subcommand_args.values_of_os("files");
+    let (matcher, _warnings) = hg::sparse::matcher(repo).unwrap();
+    let files = invocation.subcommand_args.get_many::<OsString>("files");
     if let Some(files) = files {
+        let files: Vec<&OsStr> = files
+            .filter(|s| !s.is_empty())
+            .map(|s| s.as_os_str())
+            .collect();
         for file in files {
             invocation.ui.write_stdout(b"matches: ")?;
             invocation.ui.write_stdout(
--- a/rust/rhg/src/commands/files.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/files.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,12 +1,13 @@
 use crate::error::CommandError;
-use crate::ui::Ui;
+use crate::ui::{print_narrow_sparse_warnings, Ui};
 use crate::utils::path_utils::RelativizePaths;
 use clap::Arg;
-use hg::errors::HgError;
+use hg::narrow;
 use hg::operations::list_rev_tracked_files;
-use hg::operations::Dirstate;
 use hg::repo::Repo;
+use hg::utils::filter_map_results;
 use hg::utils::hg_path::HgPath;
+use rayon::prelude::*;
 
 pub const HELP_TEXT: &str = "
 List tracked files.
@@ -14,15 +15,14 @@
 Returns 0 on success.
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    clap::SubCommand::with_name("files")
+pub fn args() -> clap::Command {
+    clap::command!("files")
         .arg(
-            Arg::with_name("rev")
+            Arg::new("rev")
                 .help("search the repository as it is in REV")
-                .short("-r")
-                .long("--revision")
-                .value_name("REV")
-                .takes_value(true),
+                .short('r')
+                .long("revision")
+                .value_name("REV"),
         )
         .about(HELP_TEXT)
 }
@@ -35,7 +35,7 @@
         ));
     }
 
-    let rev = invocation.subcommand_args.value_of("rev");
+    let rev = invocation.subcommand_args.get_one::<String>("rev");
 
     let repo = invocation.repo?;
 
@@ -51,36 +51,45 @@
         ));
     }
 
+    let (narrow_matcher, narrow_warnings) = narrow::matcher(repo)?;
+    print_narrow_sparse_warnings(&narrow_warnings, &[], invocation.ui, repo)?;
+
     if let Some(rev) = rev {
-        if repo.has_narrow() {
-            return Err(CommandError::unsupported(
-                "rhg files -r <rev> is not supported in narrow clones",
-            ));
-        }
-        let files = list_rev_tracked_files(repo, rev).map_err(|e| (e, rev))?;
+        let files = list_rev_tracked_files(repo, rev, narrow_matcher)
+            .map_err(|e| (e, rev.as_ref()))?;
         display_files(invocation.ui, repo, files.iter())
     } else {
-        // The dirstate always reflects the sparse narrowspec, so if
-        // we only have sparse without narrow all is fine.
-        // If we have narrow, then [hg files] needs to check if
-        // the store narrowspec is in sync with the one of the dirstate,
-        // so we can't support that without explicit code.
-        if repo.has_narrow() {
-            return Err(CommandError::unsupported(
-                "rhg files is not supported in narrow clones",
-            ));
-        }
-        let distate = Dirstate::new(repo)?;
-        let files = distate.tracked_files()?;
-        display_files(invocation.ui, repo, files.into_iter().map(Ok))
+        // The dirstate always reflects the sparse narrowspec.
+        let dirstate = repo.dirstate_map()?;
+        let files_res: Result<Vec<_>, _> =
+            filter_map_results(dirstate.iter(), |(path, entry)| {
+                Ok(if entry.tracked() && narrow_matcher.matches(path) {
+                    Some(path)
+                } else {
+                    None
+                })
+            })
+            .collect();
+
+        let mut files = files_res?;
+        files.par_sort_unstable();
+
+        display_files(
+            invocation.ui,
+            repo,
+            files.into_iter().map::<Result<_, CommandError>, _>(Ok),
+        )
     }
 }
 
-fn display_files<'a>(
+fn display_files<'a, E>(
     ui: &Ui,
     repo: &Repo,
-    files: impl IntoIterator<Item = Result<&'a HgPath, HgError>>,
-) -> Result<(), CommandError> {
+    files: impl IntoIterator<Item = Result<&'a HgPath, E>>,
+) -> Result<(), CommandError>
+where
+    CommandError: From<E>,
+{
     let mut stdout = ui.stdout_buffer();
     let mut any = false;
 
--- a/rust/rhg/src/commands/root.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/root.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -9,8 +9,8 @@
 Returns 0 on success.
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    clap::SubCommand::with_name("root").about(HELP_TEXT)
+pub fn args() -> clap::Command {
+    clap::command!("root").about(HELP_TEXT)
 }
 
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
--- a/rust/rhg/src/commands/status.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/commands/status.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -6,9 +6,11 @@
 // GNU General Public License version 2 or any later version.
 
 use crate::error::CommandError;
-use crate::ui::Ui;
+use crate::ui::{
+    format_pattern_file_warning, print_narrow_sparse_warnings, Ui,
+};
 use crate::utils::path_utils::RelativizePaths;
-use clap::{Arg, SubCommand};
+use clap::Arg;
 use format_bytes::format_bytes;
 use hg::config::Config;
 use hg::dirstate::has_exec_bit;
@@ -21,7 +23,6 @@
 use hg::repo::Repo;
 use hg::utils::debug::debug_wait_for_file;
 use hg::utils::files::get_bytes_from_os_string;
-use hg::utils::files::get_bytes_from_path;
 use hg::utils::files::get_path_from_bytes;
 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
 use hg::DirstateStatus;
@@ -42,75 +43,86 @@
 Some options might be missing, check the list below.
 ";
 
-pub fn args() -> clap::App<'static, 'static> {
-    SubCommand::with_name("status")
+pub fn args() -> clap::Command {
+    clap::command!("status")
         .alias("st")
         .about(HELP_TEXT)
         .arg(
-            Arg::with_name("all")
+            Arg::new("all")
                 .help("show status of all files")
-                .short("-A")
-                .long("--all"),
+                .short('A')
+                .action(clap::ArgAction::SetTrue)
+                .long("all"),
         )
         .arg(
-            Arg::with_name("modified")
+            Arg::new("modified")
                 .help("show only modified files")
-                .short("-m")
-                .long("--modified"),
+                .short('m')
+                .action(clap::ArgAction::SetTrue)
+                .long("modified"),
         )
         .arg(
-            Arg::with_name("added")
+            Arg::new("added")
                 .help("show only added files")
-                .short("-a")
-                .long("--added"),
+                .short('a')
+                .action(clap::ArgAction::SetTrue)
+                .long("added"),
         )
         .arg(
-            Arg::with_name("removed")
+            Arg::new("removed")
                 .help("show only removed files")
-                .short("-r")
-                .long("--removed"),
+                .short('r')
+                .action(clap::ArgAction::SetTrue)
+                .long("removed"),
         )
         .arg(
-            Arg::with_name("clean")
+            Arg::new("clean")
                 .help("show only clean files")
-                .short("-c")
-                .long("--clean"),
+                .short('c')
+                .action(clap::ArgAction::SetTrue)
+                .long("clean"),
         )
         .arg(
-            Arg::with_name("deleted")
+            Arg::new("deleted")
                 .help("show only deleted files")
-                .short("-d")
-                .long("--deleted"),
+                .short('d')
+                .action(clap::ArgAction::SetTrue)
+                .long("deleted"),
         )
         .arg(
-            Arg::with_name("unknown")
+            Arg::new("unknown")
                 .help("show only unknown (not tracked) files")
-                .short("-u")
-                .long("--unknown"),
+                .short('u')
+                .action(clap::ArgAction::SetTrue)
+                .long("unknown"),
         )
         .arg(
-            Arg::with_name("ignored")
+            Arg::new("ignored")
                 .help("show only ignored files")
-                .short("-i")
-                .long("--ignored"),
+                .short('i')
+                .action(clap::ArgAction::SetTrue)
+                .long("ignored"),
         )
         .arg(
-            Arg::with_name("copies")
+            Arg::new("copies")
                 .help("show source of copied files (DEFAULT: ui.statuscopies)")
-                .short("-C")
-                .long("--copies"),
+                .short('C')
+                .action(clap::ArgAction::SetTrue)
+                .long("copies"),
         )
         .arg(
-            Arg::with_name("no-status")
+            Arg::new("no-status")
                 .help("hide status prefix")
-                .short("-n")
-                .long("--no-status"),
+                .short('n')
+                .action(clap::ArgAction::SetTrue)
+                .long("no-status"),
         )
         .arg(
-            Arg::with_name("verbose")
+            Arg::new("verbose")
                 .help("enable additional output")
-                .short("-v")
-                .long("--verbose"),
+                .short('v')
+                .action(clap::ArgAction::SetTrue)
+                .long("verbose"),
         )
 }
 
@@ -159,7 +171,7 @@
 }
 
 fn has_unfinished_merge(repo: &Repo) -> Result<bool, CommandError> {
-    return Ok(repo.dirstate_parents()?.is_merge());
+    Ok(repo.dirstate_parents()?.is_merge())
 }
 
 fn has_unfinished_state(repo: &Repo) -> Result<bool, CommandError> {
@@ -182,7 +194,7 @@
             return Ok(true);
         }
     }
-    return Ok(false);
+    Ok(false)
 }
 
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
@@ -201,25 +213,25 @@
     let config = invocation.config;
     let args = invocation.subcommand_args;
 
-    let verbose = !args.is_present("print0")
-        && (args.is_present("verbose")
-            || config.get_bool(b"ui", b"verbose")?
-            || config.get_bool(b"commands", b"status.verbose")?);
+    // TODO add `!args.get_flag("print0") &&` when we support `print0`
+    let verbose = args.get_flag("verbose")
+        || config.get_bool(b"ui", b"verbose")?
+        || config.get_bool(b"commands", b"status.verbose")?;
 
-    let all = args.is_present("all");
+    let all = args.get_flag("all");
     let display_states = if all {
         // TODO when implementing `--quiet`: it excludes clean files
         // from `--all`
         ALL_DISPLAY_STATES
     } else {
         let requested = DisplayStates {
-            modified: args.is_present("modified"),
-            added: args.is_present("added"),
-            removed: args.is_present("removed"),
-            clean: args.is_present("clean"),
-            deleted: args.is_present("deleted"),
-            unknown: args.is_present("unknown"),
-            ignored: args.is_present("ignored"),
+            modified: args.get_flag("modified"),
+            added: args.get_flag("added"),
+            removed: args.get_flag("removed"),
+            clean: args.get_flag("clean"),
+            deleted: args.get_flag("deleted"),
+            unknown: args.get_flag("unknown"),
+            ignored: args.get_flag("ignored"),
         };
         if requested.is_empty() {
             DEFAULT_DISPLAY_STATES
@@ -227,27 +239,25 @@
             requested
         }
     };
-    let no_status = args.is_present("no-status");
+    let no_status = args.get_flag("no-status");
     let list_copies = all
-        || args.is_present("copies")
+        || args.get_flag("copies")
         || config.get_bool(b"ui", b"statuscopies")?;
 
     let repo = invocation.repo?;
 
-    if verbose {
-        if has_unfinished_state(repo)? {
-            return Err(CommandError::unsupported(
-                "verbose status output is not supported by rhg (and is needed because we're in an unfinished operation)",
-            ));
-        };
+    if verbose && has_unfinished_state(repo)? {
+        return Err(CommandError::unsupported(
+            "verbose status output is not supported by rhg (and is needed because we're in an unfinished operation)",
+        ));
     }
 
     let mut dmap = repo.dirstate_map_mut()?;
 
+    let check_exec = hg::checkexec::check_exec(repo.working_directory_path());
+
     let options = StatusOptions {
-        // we're currently supporting file systems with exec flags only
-        // anyway
-        check_exec: true,
+        check_exec,
         list_clean: display_states.clean,
         list_unknown: display_states.unknown,
         list_ignored: display_states.ignored,
@@ -261,7 +271,7 @@
     let after_status = |res: StatusResult| -> Result<_, CommandError> {
         let (mut ds_status, pattern_warnings) = res?;
         for warning in pattern_warnings {
-            ui.write_stderr(&print_pattern_file_warning(&warning, &repo))?;
+            ui.write_stderr(&format_pattern_file_warning(&warning, repo))?;
         }
 
         for (path, error) in ds_status.bad {
@@ -306,6 +316,7 @@
                     match unsure_is_modified(
                         working_directory_vfs,
                         store_vfs,
+                        check_exec,
                         &manifest,
                         &to_check.path,
                     ) {
@@ -396,31 +407,12 @@
         (false, false) => Box::new(AlwaysMatcher),
     };
 
-    for warning in narrow_warnings.into_iter().chain(sparse_warnings) {
-        match &warning {
-            sparse::SparseWarning::RootWarning { context, line } => {
-                let msg = format_bytes!(
-                    b"warning: {} profile cannot use paths \"
-                    starting with /, ignoring {}\n",
-                    context,
-                    line
-                );
-                ui.write_stderr(&msg)?;
-            }
-            sparse::SparseWarning::ProfileNotFound { profile, rev } => {
-                let msg = format_bytes!(
-                    b"warning: sparse profile '{}' not found \"
-                    in rev {} - ignoring it\n",
-                    profile,
-                    rev
-                );
-                ui.write_stderr(&msg)?;
-            }
-            sparse::SparseWarning::Pattern(e) => {
-                ui.write_stderr(&print_pattern_file_warning(e, &repo))?;
-            }
-        }
-    }
+    print_narrow_sparse_warnings(
+        &narrow_warnings,
+        &sparse_warnings,
+        ui,
+        repo,
+    )?;
     let (fixup, mut dirstate_write_needed, filesystem_time_at_status_start) =
         dmap.with_status(
             matcher.as_ref(),
@@ -432,7 +424,7 @@
 
     // Development config option to test write races
     if let Err(e) =
-        debug_wait_for_file(&config, "status.pre-dirstate-write-file")
+        debug_wait_for_file(config, "status.pre-dirstate-write-file")
     {
         ui.write_stderr(e.as_bytes()).ok();
     }
@@ -594,6 +586,7 @@
 fn unsure_is_modified(
     working_directory_vfs: hg::vfs::Vfs,
     store_vfs: hg::vfs::Vfs,
+    check_exec: bool,
     manifest: &Manifest,
     hg_path: &HgPath,
 ) -> Result<UnsureOutcome, HgError> {
@@ -601,20 +594,30 @@
     let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
     let fs_metadata = vfs.symlink_metadata(&fs_path)?;
     let is_symlink = fs_metadata.file_type().is_symlink();
+
+    let entry = manifest
+        .find_by_path(hg_path)?
+        .expect("ambgious file not in p1");
+
     // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
     // dirstate
     let fs_flags = if is_symlink {
         Some(b'l')
-    } else if has_exec_bit(&fs_metadata) {
+    } else if check_exec && has_exec_bit(&fs_metadata) {
         Some(b'x')
     } else {
         None
     };
 
-    let entry = manifest
-        .find_by_path(hg_path)?
-        .expect("ambgious file not in p1");
-    if entry.flags != fs_flags {
+    let entry_flags = if check_exec {
+        entry.flags
+    } else if entry.flags == Some(b'x') {
+        None
+    } else {
+        entry.flags
+    };
+
+    if entry_flags != fs_flags {
         return Ok(UnsureOutcome::Modified);
     }
     let filelog = hg::filelog::Filelog::open_vfs(&store_vfs, hg_path)?;
@@ -622,8 +625,8 @@
     let file_node = entry.node_id()?;
     let filelog_entry = filelog.entry_for_node(file_node).map_err(|_| {
         HgError::corrupted(format!(
-            "filelog missing node {:?} from manifest",
-            file_node
+            "filelog {:?} missing node {:?} from manifest",
+            hg_path, file_node
         ))
     })?;
     if filelog_entry.file_data_len_not_equal_to(fs_len) {
@@ -652,30 +655,3 @@
         UnsureOutcome::Clean
     })
 }
-
-fn print_pattern_file_warning(
-    warning: &PatternFileWarning,
-    repo: &Repo,
-) -> Vec<u8> {
-    match warning {
-        PatternFileWarning::InvalidSyntax(path, syntax) => format_bytes!(
-            b"{}: ignoring invalid syntax '{}'\n",
-            get_bytes_from_path(path),
-            &*syntax
-        ),
-        PatternFileWarning::NoSuchFile(path) => {
-            let path = if let Ok(relative) =
-                path.strip_prefix(repo.working_directory_path())
-            {
-                relative
-            } else {
-                &*path
-            };
-            format_bytes!(
-                b"skipping unreadable pattern file '{}': \
-                    No such file or directory\n",
-                get_bytes_from_path(path),
-            )
-        }
-    }
-}
--- a/rust/rhg/src/error.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/error.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -7,7 +7,7 @@
 use hg::errors::HgError;
 use hg::exit_codes;
 use hg::repo::RepoError;
-use hg::revlog::revlog::RevlogError;
+use hg::revlog::RevlogError;
 use hg::sparse::SparseConfigError;
 use hg::utils::files::get_bytes_from_path;
 use hg::{DirstateError, DirstateMapError, StatusError};
@@ -50,7 +50,7 @@
             // of error messages to handle non-UTF-8 filenames etc:
             // https://www.mercurial-scm.org/wiki/EncodingStrategy#Mixing_output
             message: utf8_to_local(message.as_ref()).into(),
-            detailed_exit_code: detailed_exit_code,
+            detailed_exit_code,
             hint: None,
         }
     }
--- a/rust/rhg/src/main.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/main.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,10 +1,7 @@
 extern crate log;
 use crate::error::CommandError;
 use crate::ui::{local_to_utf8, Ui};
-use clap::App;
-use clap::AppSettings;
-use clap::Arg;
-use clap::ArgMatches;
+use clap::{command, Arg, ArgMatches};
 use format_bytes::{format_bytes, join};
 use hg::config::{Config, ConfigSource, PlainInfo};
 use hg::repo::{Repo, RepoError};
@@ -35,55 +32,47 @@
 ) -> Result<(), CommandError> {
     check_unsupported(config, repo)?;
 
-    let app = App::new("rhg")
-        .global_setting(AppSettings::AllowInvalidUtf8)
-        .global_setting(AppSettings::DisableVersion)
-        .setting(AppSettings::SubcommandRequired)
-        .setting(AppSettings::VersionlessSubcommands)
+    let app = command!()
+        .subcommand_required(true)
         .arg(
-            Arg::with_name("repository")
+            Arg::new("repository")
                 .help("repository root directory")
-                .short("-R")
-                .long("--repository")
+                .short('R')
                 .value_name("REPO")
-                .takes_value(true)
                 // Both ok: `hg -R ./foo log` or `hg log -R ./foo`
                 .global(true),
         )
         .arg(
-            Arg::with_name("config")
+            Arg::new("config")
                 .help("set/override config option (use 'section.name=value')")
-                .long("--config")
                 .value_name("CONFIG")
-                .takes_value(true)
                 .global(true)
+                .long("config")
                 // Ok: `--config section.key1=val --config section.key2=val2`
-                .multiple(true)
                 // Not ok: `--config section.key1=val section.key2=val2`
-                .number_of_values(1),
+                .action(clap::ArgAction::Append),
         )
         .arg(
-            Arg::with_name("cwd")
+            Arg::new("cwd")
                 .help("change working directory")
-                .long("--cwd")
                 .value_name("DIR")
-                .takes_value(true)
+                .long("cwd")
                 .global(true),
         )
         .arg(
-            Arg::with_name("color")
+            Arg::new("color")
                 .help("when to colorize (boolean, always, auto, never, or debug)")
-                .long("--color")
                 .value_name("TYPE")
-                .takes_value(true)
+                .long("color")
                 .global(true),
         )
         .version("0.0.1");
     let app = add_subcommand_args(app);
 
-    let matches = app.clone().get_matches_from_safe(argv.iter())?;
+    let matches = app.try_get_matches_from(argv.iter())?;
 
-    let (subcommand_name, subcommand_matches) = matches.subcommand();
+    let (subcommand_name, subcommand_args) =
+        matches.subcommand().expect("subcommand required");
 
     // Mercurial allows users to define "defaults" for commands, fallback
     // if a default is detected for the current command
@@ -104,9 +93,7 @@
         }
     }
     let run = subcommand_run_fn(subcommand_name)
-        .expect("unknown subcommand name from clap despite AppSettings::SubcommandRequired");
-    let subcommand_args = subcommand_matches
-        .expect("no subcommand arguments from clap despite AppSettings::SubcommandRequired");
+        .expect("unknown subcommand name from clap despite Command::subcommand_required");
 
     let invocation = CliInvocation {
         ui,
@@ -216,7 +203,7 @@
                 // Same as `_matchscheme` in `mercurial/util.py`
                 regex::bytes::Regex::new("^[a-zA-Z0-9+.\\-]+:").unwrap();
         }
-        if SCHEME_RE.is_match(&repo_path_bytes) {
+        if SCHEME_RE.is_match(repo_path_bytes) {
             exit(
                 &argv,
                 &initial_current_dir,
@@ -236,7 +223,7 @@
             )
         }
     }
-    let repo_arg = early_args.repo.unwrap_or(Vec::new());
+    let repo_arg = early_args.repo.unwrap_or_default();
     let repo_path: Option<PathBuf> = {
         if repo_arg.is_empty() {
             None
@@ -267,7 +254,7 @@
             let non_repo_config_val = {
                 let non_repo_val = non_repo_config.get(b"paths", &repo_arg);
                 match &non_repo_val {
-                    Some(val) if val.len() > 0 => home::home_dir()
+                    Some(val) if !val.is_empty() => home::home_dir()
                         .unwrap_or_else(|| PathBuf::from("~"))
                         .join(get_path_from_bytes(val))
                         .canonicalize()
@@ -283,7 +270,7 @@
                 Some(val) => {
                     let local_config_val = val.get(b"paths", &repo_arg);
                     match &local_config_val {
-                        Some(val) if val.len() > 0 => {
+                        Some(val) if !val.is_empty() => {
                             // presence of a local_config assures that
                             // current_dir
                             // wont result in an Error
@@ -297,7 +284,8 @@
                     }
                 }
             };
-            config_val.or(Some(get_path_from_bytes(&repo_arg).to_path_buf()))
+            config_val
+                .or_else(|| Some(get_path_from_bytes(&repo_arg).to_path_buf()))
         }
     };
 
@@ -317,7 +305,7 @@
             )
         };
     let early_exit = |config: &Config, error: CommandError| -> ! {
-        simple_exit(&Ui::new_infallible(config), &config, Err(error))
+        simple_exit(&Ui::new_infallible(config), config, Err(error))
     };
     let repo_result = match Repo::find(&non_repo_config, repo_path.to_owned())
     {
@@ -341,13 +329,13 @@
         && config_cow
             .as_ref()
             .get_bool(b"ui", b"tweakdefaults")
-            .unwrap_or_else(|error| early_exit(&config, error.into()))
+            .unwrap_or_else(|error| early_exit(config, error.into()))
     {
         config_cow.to_mut().tweakdefaults()
     };
     let config = config_cow.as_ref();
-    let ui = Ui::new(&config)
-        .unwrap_or_else(|error| early_exit(&config, error.into()));
+    let ui = Ui::new(config)
+        .unwrap_or_else(|error| early_exit(config, error.into()));
 
     if let Ok(true) = config.get_bool(b"rhg", b"fallback-immediately") {
         exit(
@@ -373,7 +361,7 @@
         repo_result.as_ref(),
         config,
     );
-    simple_exit(&ui, &config, result)
+    simple_exit(&ui, config, result)
 }
 
 fn main() -> ! {
@@ -435,9 +423,9 @@
             }
             Some(executable) => executable,
         };
-        let executable_path = get_path_from_bytes(&executable);
+        let executable_path = get_path_from_bytes(executable);
         let this_executable = args.next().expect("exepcted argv[0] to exist");
-        if executable_path == &PathBuf::from(this_executable) {
+        if executable_path == *this_executable {
             // Avoid spawning infinitely many processes until resource
             // exhaustion.
             let _ = ui.write_stderr(&format_bytes!(
@@ -535,7 +523,7 @@
             )+
         }
 
-        fn add_subcommand_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
+        fn add_subcommand_args(app: clap::Command) -> clap::Command {
             app
             $(
                 .subcommand(commands::$command::args())
@@ -569,7 +557,7 @@
 
 pub struct CliInvocation<'a> {
     ui: &'a Ui,
-    subcommand_args: &'a ArgMatches<'a>,
+    subcommand_args: &'a ArgMatches,
     config: &'a Config,
     /// References inside `Result` is a bit peculiar but allow
     /// `invocation.repo?` to work out with `&CliInvocation` since this
@@ -752,6 +740,7 @@
 }
 
 /// Array of tuples of (auto upgrade conf, feature conf, local requirement)
+#[allow(clippy::type_complexity)]
 const AUTO_UPGRADES: &[((&str, &str), (&str, &str), &str)] = &[
     (
         ("format", "use-share-safe.automatic-upgrade-of-mismatching-repositories"),
--- a/rust/rhg/src/ui.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/ui.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -1,10 +1,15 @@
 use crate::color::ColorConfig;
 use crate::color::Effect;
+use crate::error::CommandError;
 use format_bytes::format_bytes;
 use format_bytes::write_bytes;
 use hg::config::Config;
 use hg::config::PlainInfo;
 use hg::errors::HgError;
+use hg::repo::Repo;
+use hg::sparse;
+use hg::utils::files::get_bytes_from_path;
+use hg::PatternFileWarning;
 use std::borrow::Cow;
 use std::io;
 use std::io::{ErrorKind, Write};
@@ -223,3 +228,68 @@
         atty::is(atty::Stream::Stdout)
     })
 }
+
+/// Return the formatted bytestring corresponding to a pattern file warning,
+/// as expected by the CLI.
+pub(crate) fn format_pattern_file_warning(
+    warning: &PatternFileWarning,
+    repo: &Repo,
+) -> Vec<u8> {
+    match warning {
+        PatternFileWarning::InvalidSyntax(path, syntax) => format_bytes!(
+            b"{}: ignoring invalid syntax '{}'\n",
+            get_bytes_from_path(path),
+            &*syntax
+        ),
+        PatternFileWarning::NoSuchFile(path) => {
+            let path = if let Ok(relative) =
+                path.strip_prefix(repo.working_directory_path())
+            {
+                relative
+            } else {
+                &*path
+            };
+            format_bytes!(
+                b"skipping unreadable pattern file '{}': \
+                    No such file or directory\n",
+                get_bytes_from_path(path),
+            )
+        }
+    }
+}
+
+/// Print with `Ui` the formatted bytestring corresponding to a
+/// sparse/narrow warning, as expected by the CLI.
+pub(crate) fn print_narrow_sparse_warnings(
+    narrow_warnings: &[sparse::SparseWarning],
+    sparse_warnings: &[sparse::SparseWarning],
+    ui: &Ui,
+    repo: &Repo,
+) -> Result<(), CommandError> {
+    for warning in narrow_warnings.iter().chain(sparse_warnings) {
+        match &warning {
+            sparse::SparseWarning::RootWarning { context, line } => {
+                let msg = format_bytes!(
+                    b"warning: {} profile cannot use paths \"
+                starting with /, ignoring {}\n",
+                    context,
+                    line
+                );
+                ui.write_stderr(&msg)?;
+            }
+            sparse::SparseWarning::ProfileNotFound { profile, rev } => {
+                let msg = format_bytes!(
+                    b"warning: sparse profile '{}' not found \"
+                in rev {} - ignoring it\n",
+                    profile,
+                    rev
+                );
+                ui.write_stderr(&msg)?;
+            }
+            sparse::SparseWarning::Pattern(e) => {
+                ui.write_stderr(&format_pattern_file_warning(e, repo))?;
+            }
+        }
+    }
+    Ok(())
+}
--- a/rust/rhg/src/utils/path_utils.rs	Thu Mar 02 15:21:36 2023 +0100
+++ b/rust/rhg/src/utils/path_utils.rs	Thu Mar 02 22:45:44 2023 +0100
@@ -23,7 +23,7 @@
         let repo_root = repo.working_directory_path();
         let repo_root = cwd.join(repo_root); // Make it absolute
         let repo_root_hgpath =
-            HgPathBuf::from(get_bytes_from_path(repo_root.to_owned()));
+            HgPathBuf::from(get_bytes_from_path(&repo_root));
 
         if let Ok(cwd_relative_to_repo) = cwd.strip_prefix(&repo_root) {
             // The current directory is inside the repo, so we can work with
--- a/setup.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/setup.py	Thu Mar 02 22:45:44 2023 +0100
@@ -131,11 +131,7 @@
     DistutilsError,
     DistutilsExecError,
 )
-from distutils.sysconfig import get_python_inc, get_config_var
-from distutils.version import StrictVersion
-
-# Explain to distutils.StrictVersion how our release candidates are versioned
-StrictVersion.version_re = re.compile(r'^(\d+)\.(\d+)(\.(\d+))?-?(rc(\d+))?$')
+from distutils.sysconfig import get_python_inc
 
 
 def write_if_changed(path, content):
@@ -1504,11 +1500,13 @@
         target = [target_dir]
         target.extend(self.name.split('.'))
         target[-1] += DYLIB_SUFFIX
+        target = os.path.join(*target)
+        os.makedirs(os.path.dirname(target), exist_ok=True)
         shutil.copy2(
             os.path.join(
                 self.rusttargetdir, self.dylibname + self.rustdylibsuffix()
             ),
-            os.path.join(*target),
+            target,
         )
 
 
@@ -1653,6 +1651,10 @@
     'mercurial.helptext.internals': [
         '*.txt',
     ],
+    'mercurial.thirdparty.attr': [
+        '*.pyi',
+        'py.typed',
+    ],
 }
 
 
@@ -1738,39 +1740,6 @@
     # form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
     setupversion = setupversion.split(r'+', 1)[0]
 
-if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'):
-    version = runcmd(['/usr/bin/xcodebuild', '-version'], {})[1].splitlines()
-    if version:
-        version = version[0].decode('utf-8')
-        xcode4 = version.startswith('Xcode') and StrictVersion(
-            version.split()[1]
-        ) >= StrictVersion('4.0')
-        xcode51 = re.match(r'^Xcode\s+5\.1', version) is not None
-    else:
-        # xcodebuild returns empty on OS X Lion with XCode 4.3 not
-        # installed, but instead with only command-line tools. Assume
-        # that only happens on >= Lion, thus no PPC support.
-        xcode4 = True
-        xcode51 = False
-
-    # XCode 4.0 dropped support for ppc architecture, which is hardcoded in
-    # distutils.sysconfig
-    if xcode4:
-        os.environ['ARCHFLAGS'] = ''
-
-    # XCode 5.1 changes clang such that it now fails to compile if the
-    # -mno-fused-madd flag is passed, but the version of Python shipped with
-    # OS X 10.9 Mavericks includes this flag. This causes problems in all
-    # C extension modules, and a bug has been filed upstream at
-    # http://bugs.python.org/issue21244. We also need to patch this here
-    # so Mercurial can continue to compile in the meantime.
-    if xcode51:
-        cflags = get_config_var('CFLAGS')
-        if cflags and re.search(r'-mno-fused-madd\b', cflags) is not None:
-            os.environ['CFLAGS'] = (
-                os.environ.get('CFLAGS', '') + ' -Qunused-arguments'
-            )
-
 setup(
     name='mercurial',
     version=setupversion,
--- a/tests/f	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/f	Thu Mar 02 22:45:44 2023 +0100
@@ -32,17 +32,10 @@
 import re
 import sys
 
-# Python 3 adapters
-ispy3 = sys.version_info[0] >= 3
-if ispy3:
 
-    def iterbytes(s):
-        for i in range(len(s)):
-            yield s[i : i + 1]
-
-
-else:
-    iterbytes = iter
+def iterbytes(s):
+    for i in range(len(s)):
+        yield s[i : i + 1]
 
 
 def visit(opts, filenames, outfile):
--- a/tests/get-with-headers.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/get-with-headers.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
-"""This does HTTP GET requests given a host:port and path and returns
-a subset of the headers plus the body of the result."""
+"""This does HTTP requests (GET by default) given a host:port and path and
+returns a subset of the headers plus the body of the result."""
 
 
 import argparse
@@ -39,6 +39,7 @@
     'value is <header>=<value>',
 )
 parser.add_argument('--bodyfile', help='Write HTTP response body to a file')
+parser.add_argument('--method', default='GET', help='HTTP method to use')
 parser.add_argument('host')
 parser.add_argument('path')
 parser.add_argument('show', nargs='*')
@@ -54,7 +55,7 @@
 tag = None
 
 
-def request(host, path, show):
+def request(method, host, path, show):
     assert not path.startswith('/'), path
     global tag
     headers = {}
@@ -68,7 +69,7 @@
         headers[key] = value
 
     conn = httplib.HTTPConnection(host)
-    conn.request("GET", '/' + path, None, headers)
+    conn.request(method, '/' + path, None, headers)
     response = conn.getresponse()
     stdout.write(
         b'%d %s\n' % (response.status, response.reason.encode('ascii'))
@@ -121,9 +122,9 @@
     return response.status
 
 
-status = request(args.host, args.path, args.show)
+status = request(args.method, args.host, args.path, args.show)
 if twice:
-    status = request(args.host, args.path, args.show)
+    status = request(args.method, args.host, args.path, args.show)
 
 if 200 <= status <= 305:
     sys.exit(0)
--- a/tests/hghave.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/hghave.py	Thu Mar 02 22:45:44 2023 +0100
@@ -27,26 +27,17 @@
 stdout = getattr(sys.stdout, 'buffer', sys.stdout)
 stderr = getattr(sys.stderr, 'buffer', sys.stderr)
 
-is_not_python2 = sys.version_info[0] >= 3
-if is_not_python2:
 
-    def _sys2bytes(p):
-        if p is None:
-            return p
-        return p.encode('utf-8')
-
-    def _bytes2sys(p):
-        if p is None:
-            return p
-        return p.decode('utf-8')
+def _sys2bytes(p):
+    if p is None:
+        return p
+    return p.encode('utf-8')
 
 
-else:
-
-    def _sys2bytes(p):
+def _bytes2sys(p):
+    if p is None:
         return p
-
-    _bytes2sys = _sys2bytes
+    return p.decode('utf-8')
 
 
 def check(name, desc):
@@ -168,8 +159,6 @@
 
 @check("bzr", "Breezy library and executable version >= 3.1")
 def has_bzr():
-    if not is_not_python2:
-        return False
     try:
         # Test the Breezy python lib
         import breezy
@@ -333,7 +322,7 @@
     fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
     os.close(fd)
     try:
-        return util.cachestat(path).cacheable()
+        return util.cachestat(_sys2bytes(path)).cacheable()
     finally:
         os.remove(path)
 
@@ -877,9 +866,9 @@
     return (not has_chg()) and os.environ.get('HGDEMANDIMPORT') != 'disable'
 
 
-# Add "py27", "py35", ... as possible feature checks. Note that there's no
+# Add "py36", "py37", ... as possible feature checks. Note that there's no
 # punctuation here.
-@checkvers("py", "Python >= %s", (2.7, 3.5, 3.6, 3.7, 3.8, 3.9, 3.10, 3.11))
+@checkvers("py", "Python >= %s", (3.6, 3.7, 3.8, 3.9, 3.10, 3.11))
 def has_python_range(v):
     major, minor = v.split('.')[0:2]
     py_major, py_minor = sys.version_info.major, sys.version_info.minor
@@ -897,7 +886,7 @@
     py = 'python3'
     if os.name == 'nt':
         py = 'py -3'
-    return matchoutput('%s -V' % py, br'^Python 3.(5|6|7|8|9|10|11)')
+    return matchoutput('%s -V' % py, br'^Python 3.(6|7|8|9|10|11)')
 
 
 @check("pure", "running with pure Python code")
--- a/tests/notcapable	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/notcapable	Thu Mar 02 22:45:44 2023 +0100
@@ -15,10 +15,10 @@
     if name in b'$CAP'.split(b' '):
         return False
     return orig(self, name, *args, **kwargs)
-def wrappeer(orig, self):
+def wrappeer(orig, self, path=None):
     # Since we're disabling some newer features, we need to make sure local
     # repos add in the legacy features again.
-    return localrepo.locallegacypeer(self)
+    return localrepo.locallegacypeer(self, path=path)
 EOF
 
 echo '[extensions]' >> $HGRCPATH
--- a/tests/remotefilelog-getflogheads.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/remotefilelog-getflogheads.py	Thu Mar 02 22:45:44 2023 +0100
@@ -19,7 +19,7 @@
     Used for testing purpose
     """
 
-    dest = urlutil.get_unique_pull_path(b'getflogheads', repo, ui)[0]
+    dest = urlutil.get_unique_pull_path_obj(b'getflogheads', ui)
     peer = hg.peer(repo, {}, dest)
 
     try:
--- a/tests/run-tests.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/run-tests.py	Thu Mar 02 22:45:44 2023 +0100
@@ -272,14 +272,11 @@
         with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
             s.bind(('localhost', port))
         return True
+    except PermissionError:
+        return False
     except socket.error as exc:
         if WINDOWS and exc.errno == errno.WSAEACCES:
             return False
-        # TODO: make a proper exception handler after dropping py2.  This
-        #       works because socket.error is an alias for OSError on py3,
-        #       which is also the baseclass of PermissionError.
-        elif isinstance(exc, PermissionError):
-            return False
         if exc.errno not in (
             errno.EADDRINUSE,
             errno.EADDRNOTAVAIL,
@@ -3289,6 +3286,18 @@
         # adds an extension to HGRC. Also include run-test.py directory to
         # import modules like heredoctest.
         pypath = [self._pythondir, self._testdir, runtestdir]
+
+        # Setting PYTHONPATH with an activated venv causes the modules installed
+        # in it to be ignored.  Therefore, include the related paths in sys.path
+        # in PYTHONPATH.
+        virtual_env = osenvironb.get(b"VIRTUAL_ENV")
+        if virtual_env:
+            virtual_env = os.path.join(virtual_env, b'')
+            for p in sys.path:
+                p = _sys2bytes(p)
+                if p.startswith(virtual_env):
+                    pypath.append(p)
+
         # We have to augment PYTHONPATH, rather than simply replacing
         # it, in case external libraries are only available via current
         # PYTHONPATH.  (In particular, the Subversion bindings on OS X
--- a/tests/test-acl.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-acl.t	Thu Mar 02 22:45:44 2023 +0100
@@ -116,11 +116,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -131,9 +131,9 @@
   adding foo/Bar/file.txt revisions
   adding foo/file.txt revisions
   adding quux/file.py revisions
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
@@ -182,11 +182,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -200,9 +200,9 @@
   adding quux/file.py revisions
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: changes have source "push" - skipping
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   truncating cache/rbc-revs-v1 to 8
   updating the branch cache
@@ -252,11 +252,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -280,9 +280,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   truncating cache/rbc-revs-v1 to 8
   updating the branch cache
@@ -332,11 +332,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -356,8 +356,8 @@
   acl: acl.deny not enabled
   acl: branch access granted: "ef1ea85a6374" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -403,11 +403,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -431,8 +431,8 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -478,11 +478,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -502,8 +502,8 @@
   acl: acl.deny enabled, 0 entries for user barney
   acl: branch access granted: "ef1ea85a6374" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -550,11 +550,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -578,8 +578,8 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "fred" not allowed on "quux/file.py" (changeset "911600dab2ae")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -627,11 +627,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -653,8 +653,8 @@
   acl: path access granted: "ef1ea85a6374"
   acl: branch access granted: "f9cafe1212c8" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -701,11 +701,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -725,8 +725,8 @@
   acl: acl.deny enabled, 0 entries for user barney
   acl: branch access granted: "ef1ea85a6374" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "barney" not allowed on "foo/file.txt" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -776,13 +776,13 @@
   bundle2-output-part: "bookmarks" 37 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:bookmarks" supported
-  bundle2-input-part: total payload size 37
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -798,11 +798,11 @@
   acl: acl.deny enabled, 2 entries for user fred
   acl: branch access granted: "ef1ea85a6374" on branch "default"
   acl: path access granted: "ef1ea85a6374"
-  bundle2-input-part: total payload size 520
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "bookmarks" supported
-  bundle2-input-part: total payload size 37
+  bundle2-input-part: total payload size * (glob)
   calling hook prepushkey.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.bookmarks not enabled
@@ -865,13 +865,13 @@
   bundle2-output-part: "bookmarks" 37 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:bookmarks" supported
-  bundle2-input-part: total payload size 37
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -887,11 +887,11 @@
   acl: acl.deny enabled, 2 entries for user fred
   acl: branch access granted: "ef1ea85a6374" on branch "default"
   acl: path access granted: "ef1ea85a6374"
-  bundle2-input-part: total payload size 520
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "bookmarks" supported
-  bundle2-input-part: total payload size 37
+  bundle2-input-part: total payload size * (glob)
   calling hook prepushkey.acl: hgext.acl.hook
   acl: checking access for user "fred"
   acl: acl.allow.bookmarks not enabled
@@ -954,11 +954,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -982,9 +982,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
@@ -1040,11 +1040,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1068,8 +1068,8 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "wilma" not allowed on "quux/file.py" (changeset "911600dab2ae")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -1124,11 +1124,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1143,8 +1143,8 @@
   calling hook pretxnchangegroup.acl: hgext.acl.hook
   acl: checking access for user "barney"
   error: pretxnchangegroup.acl hook raised an exception: [Errno *] * (glob)
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -1202,11 +1202,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1230,8 +1230,8 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "betty" not allowed on "quux/file.py" (changeset "911600dab2ae")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -1291,11 +1291,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1319,9 +1319,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
@@ -1381,11 +1381,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1409,9 +1409,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   truncating cache/rbc-revs-v1 to 8
   updating the branch cache
@@ -1468,11 +1468,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1494,8 +1494,8 @@
   acl: path access granted: "ef1ea85a6374"
   acl: branch access granted: "f9cafe1212c8" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -1551,11 +1551,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1580,9 +1580,9 @@
   acl: path access granted: "f9cafe1212c8"
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
-  bundle2-input-part: total payload size 1553
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   added 3 changesets with 3 changes to 3 files
@@ -1638,11 +1638,11 @@
   bundle2-output-part: "phase-heads" 24 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 20
+  bundle2-input-part: total payload size * (glob)
   invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
@@ -1666,8 +1666,8 @@
   acl: path access granted: "ef1ea85a6374"
   acl: branch access granted: "f9cafe1212c8" on branch "default"
   error: pretxnchangegroup.acl hook failed: acl: user "fred" denied on "foo/Bar/file.txt" (changeset "f9cafe1212c8")
-  bundle2-input-part: total payload size 1553
-  bundle2-input-part: total payload size 24
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -1761,11 +1761,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1792,9 +1792,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
-  bundle2-input-part: total payload size 2068
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   invalid branch cache (served.hidden): tip differs
@@ -1848,11 +1848,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1878,8 +1878,8 @@
   acl: branch access granted: "911600dab2ae" on branch "default"
   acl: path access granted: "911600dab2ae"
   error: pretxnchangegroup.acl hook failed: acl: user "astro" denied on branch "foobar" (changeset "e8fc755d4d82")
-  bundle2-input-part: total payload size 2068
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -1926,11 +1926,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1950,8 +1950,8 @@
   acl: acl.allow not enabled
   acl: acl.deny not enabled
   error: pretxnchangegroup.acl hook failed: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 2068
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -2000,11 +2000,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2024,8 +2024,8 @@
   acl: acl.allow not enabled
   acl: acl.deny not enabled
   error: pretxnchangegroup.acl hook failed: acl: user "astro" not allowed on branch "default" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 2068
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -2068,11 +2068,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2099,9 +2099,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
-  bundle2-input-part: total payload size 2068
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   invalid branch cache (served.hidden): tip differs
@@ -2160,11 +2160,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2191,9 +2191,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
-  bundle2-input-part: total payload size 2068
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   invalid branch cache (served.hidden): tip differs
@@ -2251,11 +2251,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2275,8 +2275,8 @@
   acl: acl.allow not enabled
   acl: acl.deny not enabled
   error: pretxnchangegroup.acl hook failed: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 2068
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
@@ -2324,11 +2324,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2355,9 +2355,9 @@
   acl: path access granted: "911600dab2ae"
   acl: branch access granted: "e8fc755d4d82" on branch "foobar"
   acl: path access granted: "e8fc755d4d82"
-  bundle2-input-part: total payload size 2068
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
   invalid branch cache (served.hidden): tip differs
@@ -2409,11 +2409,11 @@
   bundle2-output-part: "phase-heads" 48 bytes payload
   bundle2-input-bundle: with-transaction
   bundle2-input-part: "replycaps" supported
-  bundle2-input-part: total payload size 207
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:phases" supported
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
-  bundle2-input-part: total payload size 40
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -2433,8 +2433,8 @@
   acl: acl.allow not enabled
   acl: acl.deny not enabled
   error: pretxnchangegroup.acl hook failed: acl: user "george" denied on branch "default" (changeset "ef1ea85a6374")
-  bundle2-input-part: total payload size 2068
-  bundle2-input-part: total payload size 48
+  bundle2-input-part: total payload size * (glob)
+  bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   transaction abort!
   rollback completed
--- a/tests/test-alias.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-alias.t	Thu Mar 02 22:45:44 2023 +0100
@@ -119,6 +119,7 @@
       --close-branch        mark a branch head as closed
       --amend               amend the parent of the working directory
    -s --secret              use the secret phase for committing
+      --draft               use the draft phase for committing
    -e --edit                invoke editor on commit messages
    -i --interactive         use interactive mode
    -I --include PATTERN [+] include names matching the given patterns
--- a/tests/test-amend-subrepo.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-amend-subrepo.t	Thu Mar 02 22:45:44 2023 +0100
@@ -190,6 +190,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 5 changesets with 12 changes to 4 files
   checking subrepo links
   subrepo 't' not found in revision 04aa62396ec6
--- a/tests/test-amend.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-amend.t	Thu Mar 02 22:45:44 2023 +0100
@@ -560,6 +560,12 @@
   close=1
   phase=secret
 
+`hg amend --draft` sets phase to draft
+
+  $ hg amend --draft -m declassified
+  $ hg log --limit 1 -T 'phase={phase}\n'
+  phase=draft
+
   $ cd ..
 
 Corner case of amend from issue6157:
--- a/tests/test-bad-extension.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-bad-extension.t	Thu Mar 02 22:45:44 2023 +0100
@@ -53,7 +53,7 @@
 
   $ hg -q help help 2>&1 |grep extension
   *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
-  *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+  *** failed to import extension "badext2": No module named 'badext2'
 
 show traceback
 
@@ -61,15 +61,15 @@
   *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
   Traceback (most recent call last):
   Exception: bit bucket overflow
-  *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+  *** failed to import extension "badext2": No module named 'badext2'
   Traceback (most recent call last):
-  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ImportError: No module named 'hgext.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'hgext3rd.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'badext2' (py3 no-py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'badext2' (py36 !)
 
 names of extensions failed to load can be accessed via extensions.notloaded()
@@ -111,25 +111,25 @@
   YYYY/MM/DD HH:MM:SS (PID)>   - loading extension: badext2
   YYYY/MM/DD HH:MM:SS (PID)>     - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
   Traceback (most recent call last):
-  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ImportError: No module named 'hgext.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
   YYYY/MM/DD HH:MM:SS (PID)>     - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
   Traceback (most recent call last):
-  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ImportError: No module named 'hgext.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'hgext3rd.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
-  *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+  *** failed to import extension "badext2": No module named 'badext2'
   Traceback (most recent call last):
-  ImportError: No module named 'hgext.badext2' (py3 no-py36 !)
+  ImportError: No module named 'hgext.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext.badext2' (py36 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'hgext3rd.badext2' (py3 no-py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'hgext3rd.badext2' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext3rd.badext2' (py36 !)
-  Traceback (most recent call last): (py3 !)
+  Traceback (most recent call last):
   ModuleNotFoundError: No module named 'badext2' (py36 !)
-  ImportError: No module named 'badext2' (py3 no-py36 !)
+  ImportError: No module named 'badext2' (no-py36 !)
   YYYY/MM/DD HH:MM:SS (PID)> > loaded 2 extensions, total time * (glob)
   YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
   YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
@@ -157,7 +157,7 @@
 
   $ hg help --keyword baddocext
   *** failed to import extension "badext" from $TESTTMP/badext.py: bit bucket overflow
-  *** failed to import extension "badext2": No module named 'badext2' (py3 !)
+  *** failed to import extension "badext2": No module named 'badext2'
   Topics:
   
    extensions Using Additional Features
--- a/tests/test-basic.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-basic.t	Thu Mar 02 22:45:44 2023 +0100
@@ -121,6 +121,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 1 changesets with 1 changes to 1 files
 
 Repository root:
--- a/tests/test-bookmarks.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-bookmarks.t	Thu Mar 02 22:45:44 2023 +0100
@@ -575,8 +575,9 @@
 
   $ echo foo > f1
   $ hg bookmark tmp-rollback
-  $ hg ci -Amr
+  $ hg add .
   adding f1
+  $ hg ci -mr
   $ hg bookmarks
      X2                        1:925d80f479bb
      Y                         2:db815d6d32e6
@@ -1125,8 +1126,6 @@
   $ hg add a
   $ hg commit -m '#0'
   $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" bookmarks INVISIBLE
-  transaction abort!
-  rollback completed
   abort: pretxnclose hook exited with status 1
   [40]
   $ cp .hg/bookmarks.pending.saved .hg/bookmarks.pending
@@ -1158,8 +1157,6 @@
      x  y                      2:db815d6d32e6
   @unrelated
   no bookmarks set
-  transaction abort!
-  rollback completed
   abort: pretxnclose hook exited with status 1
   [40]
 
@@ -1242,8 +1239,6 @@
 attempt to create on a default changeset
 
   $ hg bookmark -r 81dcce76aa0b NEW
-  transaction abort!
-  rollback completed
   abort: pretxnclose-bookmark.force-public hook exited with status 1
   [40]
 
@@ -1254,7 +1249,5 @@
 move to the other branch
 
   $ hg bookmark -f -r 125c9a1d6df6 NEW
-  transaction abort!
-  rollback completed
   abort: pretxnclose-bookmark.force-forward hook exited with status 1
   [40]
--- a/tests/test-bundle-phases.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-bundle-phases.t	Thu Mar 02 22:45:44 2023 +0100
@@ -33,7 +33,6 @@
   |
   o  A public
   
-Phases are restored when unbundling
   $ hg bundle --base B -r E bundle
   3 changesets found
   $ hg debugbundle bundle
@@ -46,6 +45,57 @@
   phase-heads -- {} (mandatory: True)
       26805aba1e600a82e93661149f2313866a221a7b draft
   $ hg strip --no-backup C
+
+Phases show on incoming, and are also restored when pulling.  Secret commits
+aren't incoming or pulled, following usual incoming/pull semantics.
+
+  $ hg log -R bundle -r 'bundle()^+bundle()' -G -T '{desc} {phase}\n'
+  o  E secret
+  |
+  o  D secret
+  |
+  o  C draft
+  |
+  o  B draft
+  |
+  ~
+
+  $ hg incoming bundle -G -T '{desc} {phase}\n'
+  comparing with bundle
+  searching for changes
+  o  C draft
+  
+  $ hg pull bundle
+  pulling from bundle
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 1 changesets with 1 changes to 1 files
+  new changesets 26805aba1e60 (1 drafts)
+  (run 'hg update' to get a working copy)
+  $ hg log -G -T '{desc} {phase}\n'
+  o  C draft
+  |
+  o  B draft
+  |
+  o  A public
+  
+  $ hg log -R bundle -r 'bundle()^+bundle()' -G -T '{desc} {phase}\n'
+  o  E secret
+  |
+  o  D secret
+  |
+  o  C draft
+  |
+  o  B draft
+  |
+  ~
+
+  $ hg rollback --config ui.rollback=1
+  repository tip rolled back to revision 1 (undo pull)
+
+Phases are restored when unbundling
   $ hg unbundle -q bundle
   $ rm bundle
   $ hg log -G -T '{desc} {phase}\n'
@@ -64,7 +114,27 @@
   5 changesets found
   $ hg strip --no-backup A
   $ hg unbundle -q bundle
-  $ rm bundle
+  $ hg log -G -T '{desc} {phase}\n'
+  o  E secret
+  |
+  o  D secret
+  |
+  o  C draft
+  |
+  o  B draft
+  |
+  o  A public
+  
+  $ hg init empty
+  $ hg -R empty pull bundle
+  pulling from bundle
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 3 changesets with 3 changes to 3 files
+  new changesets 426bada5c675:26805aba1e60 (2 drafts)
+  (run 'hg update' to get a working copy)
   $ hg log -G -T '{desc} {phase}\n'
   o  E secret
   |
@@ -76,8 +146,74 @@
   |
   o  A public
   
+
+Public repo commits take precedence over phases in the bundle
+  $ hg phase --public E
+  $ hg incoming bundle -G -T '{desc} {phase}\n'
+  comparing with bundle
+  searching for changes
+  no changes found
+  $ hg log -R bundle -r 'bundle()^+bundle()' -G -T '{desc} {phase}\n'
+  o  E public
+  |
+  o  D public
+  |
+  o  C public
+  |
+  o  B public
+  |
+  o  A public
+  
+  $ hg pull bundle
+  pulling from bundle
+  searching for changes
+  no changes found
+  $ hg log -G -T '{desc} {phase}\n'
+  o  E public
+  |
+  o  D public
+  |
+  o  C public
+  |
+  o  B public
+  |
+  o  A public
+  
+  $ rm bundle
+
+A bundle with public phases that are not public in the repo will show as public
+with `hg log`, but will remain not public in the plain repo.
+
+  $ hg bundle --base B -r E bundle
+  3 changesets found
+  $ hg phase --force --draft -r C
+
+  $ hg log -R bundle -G -T '{desc} {phase}\n'
+  o  E public
+  |
+  o  D public
+  |
+  o  C public
+  |
+  o  B public
+  |
+  o  A public
+  
+  $ hg log -G -T '{desc} {phase}\n'
+  o  E draft
+  |
+  o  D draft
+  |
+  o  C draft
+  |
+  o  B public
+  |
+  o  A public
+  
+  $ hg phase --public -r E
+  $ rm bundle
+
 Completely public history can be restored
-  $ hg phase --public E
   $ hg bundle -a bundle
   5 changesets found
   $ hg strip --no-backup A
--- a/tests/test-bundle-r.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-bundle-r.t	Thu Mar 02 22:45:44 2023 +0100
@@ -17,7 +17,7 @@
   >    hg -R test bundle -r "$i" test-"$i".hg test-"$i"
   >    cd test-"$i"
   >    hg unbundle ../test-"$i".hg
-  >    hg verify
+  >    hg verify -q
   >    hg tip -q
   >    cd ..
   > done
@@ -29,11 +29,6 @@
   added 1 changesets with 1 changes to 1 files
   new changesets bfaf4b5cbf01 (1 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
   0:bfaf4b5cbf01
   searching for changes
   2 changesets found
@@ -43,11 +38,6 @@
   added 2 changesets with 2 changes to 1 files
   new changesets bfaf4b5cbf01:21f32785131f (2 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   1:21f32785131f
   searching for changes
   3 changesets found
@@ -57,11 +47,6 @@
   added 3 changesets with 3 changes to 1 files
   new changesets bfaf4b5cbf01:4ce51a113780 (3 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   2:4ce51a113780
   searching for changes
   4 changesets found
@@ -71,11 +56,6 @@
   added 4 changesets with 4 changes to 1 files
   new changesets bfaf4b5cbf01:93ee6ab32777 (4 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 4 changes to 1 files
   3:93ee6ab32777
   searching for changes
   2 changesets found
@@ -85,11 +65,6 @@
   added 2 changesets with 2 changes to 1 files
   new changesets bfaf4b5cbf01:c70afb1ee985 (2 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   1:c70afb1ee985
   searching for changes
   3 changesets found
@@ -99,11 +74,6 @@
   added 3 changesets with 3 changes to 1 files
   new changesets bfaf4b5cbf01:f03ae5a9b979 (3 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   2:f03ae5a9b979
   searching for changes
   4 changesets found
@@ -113,11 +83,6 @@
   added 4 changesets with 5 changes to 2 files
   new changesets bfaf4b5cbf01:095cb14b1b4d (4 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 5 changes to 2 files
   3:095cb14b1b4d
   searching for changes
   5 changesets found
@@ -127,11 +92,6 @@
   added 5 changesets with 6 changes to 3 files
   new changesets bfaf4b5cbf01:faa2e4234c7a (5 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 6 changes to 3 files
   4:faa2e4234c7a
   searching for changes
   5 changesets found
@@ -141,11 +101,6 @@
   added 5 changesets with 5 changes to 2 files
   new changesets bfaf4b5cbf01:916f1afdef90 (5 drafts)
   (run 'hg update' to get a working copy)
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
   4:916f1afdef90
   $ cd test-8
   $ hg pull ../test-7
@@ -158,12 +113,7 @@
   new changesets c70afb1ee985:faa2e4234c7a
   1 local changesets published
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ hg rollback
   repository tip rolled back to revision 4 (undo pull)
   $ cd ..
@@ -243,12 +193,7 @@
 
   $ hg tip -q
   8:916f1afdef90
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ hg rollback
   repository tip rolled back to revision 2 (undo unbundle)
 
@@ -268,12 +213,7 @@
 
   $ hg tip -q
   4:916f1afdef90
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
+  $ hg verify -q
   $ hg rollback
   repository tip rolled back to revision 2 (undo unbundle)
   $ hg unbundle ../test-bundle-branch2.hg
@@ -288,12 +228,7 @@
 
   $ hg tip -q
   6:faa2e4234c7a
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 7 changesets with 6 changes to 3 files
+  $ hg verify -q
   $ hg rollback
   repository tip rolled back to revision 2 (undo unbundle)
   $ hg unbundle ../test-bundle-cset-7.hg
@@ -308,12 +243,7 @@
 
   $ hg tip -q
   4:916f1afdef90
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
+  $ hg verify -q
 
   $ cd ../test
   $ hg merge 7
@@ -342,11 +272,6 @@
 
   $ hg tip -q
   9:03fc0b0e347c
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 10 changesets with 7 changes to 4 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-bundle.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-bundle.t	Thu Mar 02 22:45:44 2023 +0100
@@ -28,12 +28,7 @@
   1 files updated, 0 files merged, 2 files removed, 0 files unresolved
   $ hg mv afile anotherfile
   $ hg commit -m "0.3m"
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ cd ..
   $ hg init empty
 
@@ -70,12 +65,7 @@
 
   $ hg -R empty heads
   [1]
-  $ hg -R empty verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 0 changesets with 0 changes to 0 files
+  $ hg -R empty verify -q
 
 #if repobundlerepo
 
@@ -853,12 +843,7 @@
 
 but, regular verify must continue to work
 
-  $ hg -R orig verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
+  $ hg -R orig verify -q
 
 #if repobundlerepo
 diff against bundle
@@ -939,12 +924,7 @@
 
   $ hg clone -q -r0 . part2
   $ hg -q -R part2 pull bundle.hg
-  $ hg -R part2 verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 5 changes to 4 files
+  $ hg -R part2 verify -q
 #endif
 
 == Test bundling no commits
@@ -1039,6 +1019,24 @@
   $ hg bundle -a --config devel.bundle.delta=full ./full.hg
   3 changesets found
 
+
+Test the debug statistic when building a bundle
+-----------------------------------------------
+
+  $ hg bundle -a ./default.hg --config debug.bundling-stats=yes
+  3 changesets found
+  DEBUG-BUNDLING: revisions:                9
+  DEBUG-BUNDLING:   changelog:              3
+  DEBUG-BUNDLING:   manifest:               3
+  DEBUG-BUNDLING:   files:                  3 (for 3 revlogs)
+  DEBUG-BUNDLING: deltas:
+  DEBUG-BUNDLING:   from-storage:           2 (100% of available 2)
+  DEBUG-BUNDLING:   computed:               7
+  DEBUG-BUNDLING:     full:                 7 (100% of native 7)
+  DEBUG-BUNDLING:       changelog:          3 (100% of native 3)
+  DEBUG-BUNDLING:       manifests:          1 (100% of native 1)
+  DEBUG-BUNDLING:       files:              3 (100% of native 3)
+
 Test the debug output when applying delta
 -----------------------------------------
 
@@ -1048,18 +1046,62 @@
   > --config storage.revlog.reuse-external-delta=no \
   > --config storage.revlog.reuse-external-delta-parent=no
   adding changesets
-  DBG-DELTAS: CHANGELOG:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: CHANGELOG:   rev=1: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: CHANGELOG:   rev=2: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: CHANGELOG:   rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: CHANGELOG:   rev=1: delta-base=1 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: CHANGELOG:   rev=2: delta-base=2 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
   adding manifests
-  DBG-DELTAS: MANIFESTLOG: rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: MANIFESTLOG: rev=1: search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: MANIFESTLOG: rev=2: search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
   adding file changes
-  DBG-DELTAS: FILELOG:a:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: FILELOG:b:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: FILELOG:c:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:a:   rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:b:   rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:c:   rev=0: delta-base=0 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
   added 3 changesets with 3 changes to 3 files
   new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
   (run 'hg update' to get a working copy)
 
+
+Test the debug statistic when applying a bundle
+-----------------------------------------------
+
+  $ hg init bar
+  $ hg -R bar unbundle ./default.hg  --config debug.unbundling-stats=yes
+  adding changesets
+  adding manifests
+  adding file changes
+  DEBUG-UNBUNDLING: revisions:                9
+  DEBUG-UNBUNDLING:   changelog:              3             ( 33%)
+  DEBUG-UNBUNDLING:   manifests:              3             ( 33%)
+  DEBUG-UNBUNDLING:   files:                  3             ( 33%)
+  DEBUG-UNBUNDLING: total-time:      ?????????????? seconds (glob)
+  DEBUG-UNBUNDLING:   changelog:     ?????????????? seconds (???%) (glob)
+  DEBUG-UNBUNDLING:   manifests:     ?????????????? seconds (???%) (glob)
+  DEBUG-UNBUNDLING:   files:         ?????????????? seconds (???%) (glob)
+  DEBUG-UNBUNDLING: type-count:
+  DEBUG-UNBUNDLING:   changelog:
+  DEBUG-UNBUNDLING:     full:                 3
+  DEBUG-UNBUNDLING:       cached:             3             (100%)
+  DEBUG-UNBUNDLING:   manifests:
+  DEBUG-UNBUNDLING:     full:                 1
+  DEBUG-UNBUNDLING:       cached:             1             (100%)
+  DEBUG-UNBUNDLING:     delta:                2
+  DEBUG-UNBUNDLING:       cached:             2             (100%)
+  DEBUG-UNBUNDLING:   files:
+  DEBUG-UNBUNDLING:     full:                 3
+  DEBUG-UNBUNDLING:       cached:             3             (100%)
+  DEBUG-UNBUNDLING: type-time:
+  DEBUG-UNBUNDLING:   changelog:
+  DEBUG-UNBUNDLING:     full:        ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:   manifests:
+  DEBUG-UNBUNDLING:     full:        ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:     delta:       ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:   files:
+  DEBUG-UNBUNDLING:     full:        ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  added 3 changesets with 3 changes to 3 files
+  new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
+  (run 'hg update' to get a working copy)
--- a/tests/test-bundle2-exchange.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-bundle2-exchange.t	Thu Mar 02 22:45:44 2023 +0100
@@ -739,12 +739,10 @@
   $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
   pushing to ssh://user@dummy/other
   searching for changes
-  remote: Fail early! (no-py3 chg !)
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: Fail early! (py3 !)
-  remote: Fail early! (no-py3 no-chg !)
+  remote: Fail early!
   remote: transaction abort!
   remote: Cleaning up the mess...
   remote: rollback completed
--- a/tests/test-censor.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-censor.t	Thu Mar 02 22:45:44 2023 +0100
@@ -175,6 +175,7 @@
   checking files
    target@1: censored file data
    target@2: censored file data
+  not checking dirstate because of previous errors
   checked 5 changesets with 7 changes to 2 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 1)
@@ -205,12 +206,7 @@
 
 Repo passes verification with warnings with explicit config
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 7 changes to 2 files
+  $ hg verify -q
 
 May update to revision with censored data with explicit config
 
@@ -330,24 +326,14 @@
   $ hg cat -r $C1 target | head -n 10
   $ hg cat -r 0 target | head -n 10
   Initially untainted file
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 12 changesets with 13 changes to 2 files
+  $ hg verify -q
 
 Repo cloned before tainted content introduced can pull censored nodes
 
   $ cd ../rpull
   $ hg cat -r tip target | head -n 10
   Initially untainted file
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ hg pull -r $H1 -r $H2
   pulling from $TESTTMP/r
   searching for changes
@@ -369,12 +355,7 @@
   $ hg cat -r $C1 target | head -n 10
   $ hg cat -r 0 target | head -n 10
   Initially untainted file
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 12 changesets with 13 changes to 2 files
+  $ hg verify -q
 
 Censored nodes can be pushed if they censor previously unexchanged nodes
 
@@ -429,12 +410,7 @@
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat target | head -n 10
   Re-sanitized; nothing to see here
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 14 changesets with 15 changes to 2 files
+  $ hg verify -q
 
 Grepping only warns, doesn't error out
 
@@ -488,12 +464,7 @@
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat target | head -n 10
   Re-sanitized; nothing to see here
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 14 changesets with 15 changes to 2 files
+  $ hg verify -q
   $ cd ../r
 
 Can import bundle where first revision of a file is censored
--- a/tests/test-clone-pull-corruption.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-clone-pull-corruption.t	Thu Mar 02 22:45:44 2023 +0100
@@ -43,11 +43,6 @@
 see what happened
 
   $ wait
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-clone-r.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-clone-r.t	Thu Mar 02 22:45:44 2023 +0100
@@ -66,12 +66,7 @@
        5       7 09bb521d218d de68e904d169 000000000000
        6       8 1fde233dfb0f f54c32f13478 000000000000
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
 
   $ cd ..
 
@@ -80,7 +75,7 @@
   >   echo ---- hg clone -r "$i" test test-"$i"
   >   hg clone -r "$i" test test-"$i"
   >   cd test-"$i"
-  >   hg verify
+  >   hg verify -q
   >   cd ..
   > done
   
@@ -92,11 +87,6 @@
   new changesets f9ee2f85a263
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
   
   ---- hg clone -r 1 test test-1
   adding changesets
@@ -106,11 +96,6 @@
   new changesets f9ee2f85a263:34c2bf6b0626
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   
   ---- hg clone -r 2 test test-2
   adding changesets
@@ -120,11 +105,6 @@
   new changesets f9ee2f85a263:e38ba6f5b7e0
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   
   ---- hg clone -r 3 test test-3
   adding changesets
@@ -134,11 +114,6 @@
   new changesets f9ee2f85a263:eebf5a27f8ca
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 4 changes to 1 files
   
   ---- hg clone -r 4 test test-4
   adding changesets
@@ -148,11 +123,6 @@
   new changesets f9ee2f85a263:095197eb4973
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   
   ---- hg clone -r 5 test test-5
   adding changesets
@@ -162,11 +132,6 @@
   new changesets f9ee2f85a263:1bb50a9436a7
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   
   ---- hg clone -r 6 test test-6
   adding changesets
@@ -176,11 +141,6 @@
   new changesets f9ee2f85a263:7373c1169842
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 5 changes to 2 files
   
   ---- hg clone -r 7 test test-7
   adding changesets
@@ -190,11 +150,6 @@
   new changesets f9ee2f85a263:a6a34bfa0076
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 6 changes to 3 files
   
   ---- hg clone -r 8 test test-8
   adding changesets
@@ -204,11 +159,6 @@
   new changesets f9ee2f85a263:aa35859c02ea
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
 
   $ cd test-8
   $ hg pull ../test-7
@@ -220,12 +170,7 @@
   added 4 changesets with 2 changes to 3 files (+1 heads)
   new changesets 095197eb4973:a6a34bfa0076
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ cd ..
 
   $ hg clone test test-9
--- a/tests/test-clone-stream-format.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-clone-stream-format.t	Thu Mar 02 22:45:44 2023 +0100
@@ -110,12 +110,7 @@
   new changesets 96ee1d7354c4:06ddac466af5
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R server-no-store
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg verify -R server-no-store -q
   $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
   $ cat hg-1.pid > $DAEMON_PIDS
   $ hg -R server-no-store serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -129,12 +124,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-store --config format.usestore=no
   $ cat errors-1.txt
-  $ hg -R clone-remove-store verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-remove-store verify -q
   $ hg debugrequires -R clone-remove-store | grep store
   [1]
 
@@ -143,12 +133,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-store --config format.usestore=yes
   $ cat errors-2.txt
-  $ hg -R clone-add-store verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-add-store verify -q
   $ hg debugrequires -R clone-add-store | grep store
   store
 
@@ -171,12 +156,7 @@
   new changesets 96ee1d7354c4:06ddac466af5
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R server-no-fncache
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg verify -R server-no-fncache -q
   $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
   $ cat hg-1.pid > $DAEMON_PIDS
   $ hg -R server-no-fncache serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -190,12 +170,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-fncache --config format.usefncache=no
   $ cat errors-1.txt
-  $ hg -R clone-remove-fncache verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-remove-fncache verify -q
   $ hg debugrequires -R clone-remove-fncache | grep fncache
   [1]
 
@@ -204,12 +179,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-fncache --config format.usefncache=yes
   $ cat errors-2.txt
-  $ hg -R clone-add-fncache verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-add-fncache verify -q
   $ hg debugrequires -R clone-add-fncache | grep fncache
   fncache
 
@@ -231,12 +201,7 @@
   new changesets 96ee1d7354c4:06ddac466af5
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R server-no-dotencode
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg verify -R server-no-dotencode -q
   $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
   $ cat hg-1.pid > $DAEMON_PIDS
   $ hg -R server-no-dotencode serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -250,12 +215,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-dotencode --config format.dotencode=no
   $ cat errors-1.txt
-  $ hg -R clone-remove-dotencode verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-remove-dotencode verify -q
   $ hg debugrequires -R clone-remove-dotencode | grep dotencode
   [1]
 
@@ -264,12 +224,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-dotencode --config format.dotencode=yes
   $ cat errors-2.txt
-  $ hg -R clone-add-dotencode verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-add-dotencode verify -q
   $ hg debugrequires -R clone-add-dotencode | grep dotencode
   dotencode
 
@@ -289,12 +244,7 @@
   $ cat hg-1.pid > $DAEMON_PIDS
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-from-share
-  $ hg -R clone-from-share verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-from-share verify -q
   $ hg debugrequires -R clone-from-share | egrep 'share$'
   [1]
 
@@ -313,12 +263,7 @@
   new changesets 96ee1d7354c4:06ddac466af5
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R server-no-share-safe
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg verify -R server-no-share-safe -q
   $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
   $ cat hg-1.pid > $DAEMON_PIDS
   $ hg -R server-no-share-safe serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -332,12 +277,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-share-safe --config format.use-share-safe=no
   $ cat errors-1.txt
-  $ hg -R clone-remove-share-safe verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-remove-share-safe verify -q
   $ hg debugrequires -R clone-remove-share-safe | grep share-safe
   [1]
 
@@ -346,12 +286,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-share-safe --config format.use-share-safe=yes
   $ cat errors-2.txt
-  $ hg -R clone-add-share-safe verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-add-share-safe verify -q
   $ hg debugrequires -R clone-add-share-safe | grep share-safe
   share-safe
 
@@ -374,12 +309,7 @@
   new changesets 96ee1d7354c4:06ddac466af5
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R server-no-persistent-nodemap
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg verify -R server-no-persistent-nodemap -q
   $ hg -R server serve -p $HGPORT -d --pid-file=hg-1.pid --error errors-1.txt
   $ cat hg-1.pid > $DAEMON_PIDS
   $ hg -R server-no-persistent-nodemap serve -p $HGPORT2 -d --pid-file=hg-2.pid --error errors-2.txt
@@ -401,12 +331,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT clone-remove-persistent-nodemap --config format.use-persistent-nodemap=no
   $ cat errors-1.txt
-  $ hg -R clone-remove-persistent-nodemap verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-remove-persistent-nodemap verify -q
   $ hg debugrequires -R clone-remove-persistent-nodemap | grep persistent-nodemap
   [1]
 
@@ -421,12 +346,7 @@
 
   $ hg clone --quiet --stream -U http://localhost:$HGPORT2 clone-add-persistent-nodemap --config format.use-persistent-nodemap=yes
   $ cat errors-2.txt
-  $ hg -R clone-add-persistent-nodemap verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5004 changesets with 1088 changes to 1088 files
+  $ hg -R clone-add-persistent-nodemap verify -q
   $ hg debugrequires -R clone-add-persistent-nodemap | grep persistent-nodemap
   persistent-nodemap
 
--- a/tests/test-clone-stream.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-clone-stream.t	Thu Mar 02 22:45:44 2023 +0100
@@ -94,12 +94,7 @@
 
 Check that the clone went well
 
-  $ hg verify -R local-clone
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 1088 changes to 1088 files
+  $ hg verify -R local-clone -q
 
 Check uncompressed
 ==================
@@ -651,12 +646,7 @@
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
 #endif
-  $ hg verify -R with-bookmarks
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 1088 changes to 1088 files
+  $ hg verify -R with-bookmarks -q
   $ hg -R with-bookmarks bookmarks
      some-bookmark             2:5223b5e3265f
 
@@ -692,12 +682,7 @@
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
 #endif
-  $ hg verify -R phase-publish
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 1088 changes to 1088 files
+  $ hg verify -R phase-publish -q
   $ hg -R phase-publish phase -r 'all()'
   0: public
   1: public
@@ -747,12 +732,7 @@
   1: draft
   2: draft
 #endif
-  $ hg verify -R phase-no-publish
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 1088 changes to 1088 files
+  $ hg verify -R phase-no-publish -q
 
   $ killdaemons.py
 
@@ -801,12 +781,7 @@
   0: draft
   $ hg debugobsolete -R with-obsolescence
   8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
-  $ hg verify -R with-obsolescence
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 1089 changes to 1088 files
+  $ hg verify -R with-obsolescence -q
 
   $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
   streaming all changes
--- a/tests/test-clone.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-clone.t	Thu Mar 02 22:45:44 2023 +0100
@@ -59,12 +59,7 @@
 
   $ cat a
   a
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 11 changes to 2 files
+  $ hg verify -q
 
 Invalid dest '' must abort:
 
@@ -122,12 +117,7 @@
 
   $ cat a 2>/dev/null || echo "a not present"
   a not present
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 11 changes to 2 files
+  $ hg verify -q
 
 Default destination:
 
@@ -167,12 +157,7 @@
   new changesets acb14030fe0a:a7949464abda
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R g verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 11 changes to 2 files
+  $ hg -R g verify -q
 
 Invalid dest '' with --pull must abort (issue2528):
 
--- a/tests/test-commandserver.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-commandserver.t	Thu Mar 02 22:45:44 2023 +0100
@@ -541,6 +541,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 2 changesets with 2 changes to 1 files
   $ hg revert --no-backup -aq
 
@@ -825,6 +826,7 @@
   message: '\xa6Ditem@Cpos\xf6EtopicMcrosscheckingEtotal\xf6DtypeHprogressDunit@'
   message: '\xa2DdataOchecking files\nDtypeFstatus'
   message: '\xa6Ditem@Cpos\xf6EtopicHcheckingEtotal\xf6DtypeHprogressDunit@'
+  message: '\xa2DdataRchecking dirstate\nDtypeFstatus'
   message: '\xa2DdataX/checked 0 changesets with 0 changes to 0 files\nDtypeFstatus'
 
   >>> from hgclient import checkwith, readchannel, runcommand, stringio
--- a/tests/test-commit-amend.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-commit-amend.t	Thu Mar 02 22:45:44 2023 +0100
@@ -123,13 +123,13 @@
   uncompressed size of bundle content:
        254 (changelog)
        163 (manifests)
-       131  a
+       133  a
   saved backup bundle to $TESTTMP/repo/.hg/strip-backup/47343646fa3d-c2758885-amend.hg
   1 changesets found
   uncompressed size of bundle content:
        250 (changelog)
        163 (manifests)
-       131  a
+       133  a
   adding branch
   adding changesets
   adding manifests
@@ -267,13 +267,13 @@
   uncompressed size of bundle content:
        249 (changelog)
        163 (manifests)
-       133  a
+       135  a
   saved backup bundle to $TESTTMP/repo/.hg/strip-backup/a9a13940fc03-7c2e8674-amend.hg
   1 changesets found
   uncompressed size of bundle content:
        257 (changelog)
        163 (manifests)
-       133  a
+       135  a
   adding branch
   adding changesets
   adding manifests
@@ -303,13 +303,13 @@
   uncompressed size of bundle content:
        257 (changelog)
        163 (manifests)
-       133  a
+       135  a
   saved backup bundle to $TESTTMP/repo/.hg/strip-backup/64a124ba1b44-10374b8f-amend.hg
   1 changesets found
   uncompressed size of bundle content:
        257 (changelog)
        163 (manifests)
-       135  a
+       137  a
   adding branch
   adding changesets
   adding manifests
--- a/tests/test-completion.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-completion.t	Thu Mar 02 22:45:44 2023 +0100
@@ -77,6 +77,7 @@
   debug-delta-find
   debug-repair-issue6528
   debug-revlog-index
+  debug-revlog-stats
   debugancestor
   debugantivirusrunning
   debugapplystreamclonebundle
@@ -264,13 +265,14 @@
   bundle: exact, force, rev, branch, base, all, type, ssh, remotecmd, insecure
   cat: output, rev, decode, include, exclude, template
   clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
-  commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
+  commit: addremove, close-branch, amend, secret, draft, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
   config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
   continue: dry-run
   copy: forget, after, at-rev, force, include, exclude, dry-run
-  debug-delta-find: changelog, manifest, dir, template
+  debug-delta-find: changelog, manifest, dir, template, source
   debug-repair-issue6528: to-report, from-report, paranoid, dry-run
   debug-revlog-index: changelog, manifest, dir, template
+  debug-revlog-stats: changelog, manifest, filelogs, template
   debugancestor: 
   debugantivirusrunning: 
   debugapplystreamclonebundle: 
@@ -326,7 +328,7 @@
   debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
   debugserve: sshstdio, logiofd, logiofile
   debugsetparents: 
-  debugshell: 
+  debugshell: command
   debugsidedata: changelog, manifest, dir
   debugssl: 
   debugstrip: rev, force, no-backup, nobackup, , keep, bookmark, soft
--- a/tests/test-context.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-context.py	Thu Mar 02 22:45:44 2023 +0100
@@ -42,8 +42,10 @@
 os.utime('foo', (1000, 1000))
 
 # add+commit 'foo'
-repo[None].add([b'foo'])
-repo.commit(text=b'commit1', date=b"0 0")
+with repo.wlock(), repo.lock(), repo.transaction(b'test-context'):
+    with repo.dirstate.changing_files(repo):
+        repo[None].add([b'foo'])
+    repo.commit(text=b'commit1', date=b"0 0")
 
 d = repo[None][b'foo'].date()
 if os.name == 'nt':
@@ -108,16 +110,20 @@
 
 repo.wwrite(b'bar-m', b'bar-m\n', b'')
 repo.wwrite(b'bar-r', b'bar-r\n', b'')
-repo[None].add([b'bar-m', b'bar-r'])
-repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
+with repo.wlock(), repo.lock(), repo.transaction(b'test-context'):
+    with repo.dirstate.changing_files(repo):
+        repo[None].add([b'bar-m', b'bar-r'])
+    repo.commit(text=b'add bar-m, bar-r', date=b"0 0")
 
 # ancestor "wcctx ~ 1"
 actx1 = repo[b'.']
 
 repo.wwrite(b'bar-m', b'bar-m bar-m\n', b'')
 repo.wwrite(b'bar-a', b'bar-a\n', b'')
-repo[None].add([b'bar-a'])
-repo[None].forget([b'bar-r'])
+with repo.wlock(), repo.lock(), repo.transaction(b'test-context'):
+    with repo.dirstate.changing_files(repo):
+        repo[None].add([b'bar-a'])
+        repo[None].forget([b'bar-r'])
 
 # status at this point:
 #   M bar-m
@@ -237,7 +243,8 @@
 with repo.wlock(), repo.lock(), repo.transaction(b'test'):
     with open(b'4', 'wb') as f:
         f.write(b'4')
-    repo.dirstate.set_tracked(b'4')
+    with repo.dirstate.changing_files(repo):
+        repo.dirstate.set_tracked(b'4')
     repo.commit(b'4')
     revsbefore = len(repo.changelog)
     repo.invalidate(clearfilecache=True)
--- a/tests/test-contrib-dumprevlog.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-contrib-dumprevlog.t	Thu Mar 02 22:45:44 2023 +0100
@@ -14,12 +14,7 @@
 
   $ echo adding more to file a >> a
   $ hg commit -m third
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
+  $ hg verify -q
 
 Dumping revlog of file a to stdout:
   $ "$PYTHON" "$CONTRIBDIR/dumprevlog" .hg/store/data/a.i
@@ -79,12 +74,7 @@
 
 Verify:
 
-  $ hg -R repo-c verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
+  $ hg -R repo-c verify -q
 
 Compare repos:
 
--- a/tests/test-contrib-perf.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-contrib-perf.t	Thu Mar 02 22:45:44 2023 +0100
@@ -307,7 +307,7 @@
   malformatted run limit entry, missing "-": 500
   ! wall * comb * user * sys * (best of 5) (glob)
   $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
-  malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
+  malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
   ! wall * comb * user * sys * (best of 5) (glob)
   $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
   malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
--- a/tests/test-convert-filemap.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-convert-filemap.t	Thu Mar 02 22:45:44 2023 +0100
@@ -292,12 +292,12 @@
   $ rm -rf source/.hg/store/data/dir/file4
 #endif
   $ hg -q convert --filemap renames.fmap --datesort source dummydest
-  abort: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  abort: dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
   abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   [50]
   $ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
-  ignoring: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
-  ignoring: data/dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
+  ignoring: dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  ignoring: dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
   ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
   $ hg up -q -R renames.repo
@@ -312,12 +312,7 @@
   |
   o  0 "0: add foo baz dir/" files: dir2/dir3/file dir2/dir3/subdir/file3 foo2
   
-  $ hg -R renames.repo verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 7 changes to 4 files
+  $ hg -R renames.repo verify -q
 
   $ hg -R renames.repo manifest --debug
   d43feacba7a4f1f2080dde4a4b985bd8a0236d46 644   copied2
--- a/tests/test-convert-hg-source.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-convert-hg-source.t	Thu Mar 02 22:45:44 2023 +0100
@@ -182,18 +182,13 @@
   sorting...
   converting...
   4 init
-  ignoring: data/b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
+  ignoring: b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
   ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
   3 changeall
   2 changebagain
   1 merge
   0 moveb
-  $ hg -R fixed verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 3 files
+  $ hg -R fixed verify -q
 
 manifest -r 0
 
--- a/tests/test-copy.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-copy.t	Thu Mar 02 22:45:44 2023 +0100
@@ -96,12 +96,7 @@
   $ hg cat a > asum
   $ md5sum.py asum
   60b725f10c9c85c70d97880dfe8191b3  asum
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
+  $ hg verify -q
 
   $ cd ..
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-debug-revlog-stats.t	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,77 @@
+Force revlog max inline value to be smaller than default
+
+  $ mkdir $TESTTMP/ext
+  $ cat << EOF > $TESTTMP/ext/small_inline.py
+  > from mercurial import revlog
+  > revlog._maxinline = 8
+  > EOF
+
+  $ cat << EOF >> $HGRCPATH
+  > [extensions]
+  > small_inline=$TESTTMP/ext/small_inline.py
+  > EOF
+
+  $ hg init repo
+  $ cd repo
+
+Try on an empty repository
+
+  $ hg debug-revlog-stats
+  rev-count   data-size inl type      target 
+          0           0 yes changelog 
+          0           0 yes manifest  
+
+  $ mkdir folder
+  $ touch a b folder/c folder/d
+  $ hg commit -Aqm 0
+  $ echo "text" > a
+  $ hg rm b
+  $ echo "longer string" > folder/d
+  $ hg commit -Aqm 1
+
+Differences in data size observed with pure is due to different compression
+algorithms
+
+  $ hg debug-revlog-stats
+  rev-count   data-size inl type      target 
+          2         138 no  changelog  (no-pure !)
+          2         137 no  changelog  (pure !)
+          2         177 no  manifest   (no-pure !)
+          2         168 no  manifest   (pure !)
+          2           6 yes file      a
+          1           0 yes file      b
+          1           0 yes file      folder/c
+          2          15 no  file      folder/d
+
+Test 'changelog' command argument
+
+  $ hg debug-revlog-stats -c
+  rev-count   data-size inl type      target 
+          2         138 no  changelog  (no-pure !)
+          2         137 no  changelog  (pure !)
+
+Test 'manifest' command argument
+
+  $ hg debug-revlog-stats -m
+  rev-count   data-size inl type      target 
+          2         177 no  manifest   (no-pure !)
+          2         168 no  manifest   (pure !)
+
+Test 'file' command argument
+
+  $ hg debug-revlog-stats -f
+  rev-count   data-size inl type      target 
+          2           6 yes file      a
+          1           0 yes file      b
+          1           0 yes file      folder/c
+          2          15 no  file      folder/d
+
+Test multiple command arguments
+
+  $ hg debug-revlog-stats -cm
+  rev-count   data-size inl type      target 
+          2         138 no  changelog  (no-pure !)
+          2         137 no  changelog  (pure !)
+          2         177 no  manifest   (no-pure !)
+          2         168 no  manifest   (pure !)
+
--- a/tests/test-debugcommands.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-debugcommands.t	Thu Mar 02 22:45:44 2023 +0100
@@ -39,6 +39,9 @@
   chunks size   : 191
       0x75 (u)  : 191 (100.00%)
   
+  
+  total-stored-content: 188 bytes
+  
   avg chain length  :  0
   max chain length  :  0
   max chain reach   : 67
@@ -74,6 +77,9 @@
       empty     :  0 ( 0.00%)
       0x75 (u)  : 88 (100.00%)
   
+  
+  total-stored-content: 86 bytes
+  
   avg chain length  :  0
   max chain length  :  0
   max chain reach   : 44
@@ -107,6 +113,9 @@
   chunks size   : 3
       0x75 (u)  : 3 (100.00%)
   
+  
+  total-stored-content: 2 bytes
+  
   avg chain length  : 0
   max chain length  : 0
   max chain reach   : 3
@@ -212,7 +221,7 @@
    {
     "chainid": 1,
     "chainlen": 1,
-    "chainratio": 1.0232558139534884, (py3 !)
+    "chainratio": 1.0232558139534884,
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -252,7 +261,7 @@
    {
     "chainid": 3,
     "chainlen": 1,
-    "chainratio": 1.0232558139534884, (py3 !)
+    "chainratio": 1.0232558139534884,
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -293,7 +302,7 @@
    {
     "chainid": 1,
     "chainlen": 1,
-    "chainratio": 1.0232558139534884, (py3 !)
+    "chainratio": 1.0232558139534884,
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -333,7 +342,7 @@
    {
     "chainid": 3,
     "chainlen": 1,
-    "chainratio": 1.0232558139534884, (py3 !)
+    "chainratio": 1.0232558139534884,
     "chainsize": 44,
     "compsize": 44,
     "deltatype": "base",
@@ -715,3 +724,8 @@
   pushable: yes
 
 #endif
+
+Test debugshell
+
+  $ hg debugshell -c 'ui.write(b"%s\n" % ui.username())'
+  test
--- a/tests/test-demandimport.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-demandimport.py	Thu Mar 02 22:45:44 2023 +0100
@@ -8,7 +8,6 @@
 import types
 
 # Don't import pycompat because it has too many side-effects.
-ispy3 = sys.version_info[0] >= 3
 ispy311 = (sys.version_info.major, sys.version_info.minor) >= (3, 11)
 
 # Only run if demandimport is allowed
@@ -25,14 +24,11 @@
 if sys.version_info[0:2] == (3, 5):
     sys.exit(80)
 
-if ispy3:
-    from importlib.util import _LazyModule
+from importlib.util import _LazyModule
 
-    try:
-        from importlib.util import _Module as moduletype
-    except ImportError:
-        moduletype = types.ModuleType
-else:
+try:
+    from importlib.util import _Module as moduletype
+except ImportError:
     moduletype = types.ModuleType
 
 if os.name != 'nt':
@@ -68,10 +64,7 @@
 
 # We use assert instead of a unittest test case because having imports inside
 # functions changes behavior of the demand importer.
-if ispy3:
-    assert not isinstance(node, _LazyModule)
-else:
-    assert f(node) == "<module 'mercurial.node' from '?'>", f(node)
+assert not isinstance(node, _LazyModule)
 
 # now enable it for real
 del os.environ['HGDEMANDIMPORT']
@@ -81,11 +74,8 @@
 assert 'mercurial.error' not in sys.modules
 from mercurial import error as errorproxy
 
-if ispy3:
-    assert isinstance(errorproxy, _LazyModule)
-    assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
-else:
-    assert f(errorproxy) == "<unloaded module 'error'>", f(errorproxy)
+assert isinstance(errorproxy, _LazyModule)
+assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
 
 doc = ' '.join(errorproxy.__doc__.split()[:3])
 assert doc == 'Mercurial exceptions. This', doc
@@ -96,22 +86,16 @@
 name = errorproxy.__dict__['__name__']
 assert name == 'mercurial.error', name
 
-if ispy3:
-    assert not isinstance(errorproxy, _LazyModule)
-    assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
-else:
-    assert f(errorproxy) == "<proxied module 'error'>", f(errorproxy)
+assert not isinstance(errorproxy, _LazyModule)
+assert f(errorproxy) == "<module 'mercurial.error' from '?'>", f(errorproxy)
 
 import os
 
-if ispy3:
-    assert not isinstance(os, _LazyModule)
-    if ispy311:
-        assert f(os) == "<module 'os' (frozen)>", f(os)
-    else:
-        assert f(os) == "<module 'os' from '?'>", f(os)
+assert not isinstance(os, _LazyModule)
+if ispy311:
+    assert f(os) == "<module 'os' (frozen)>", f(os)
 else:
-    assert f(os) == "<unloaded module 'os'>", f(os)
+    assert f(os) == "<module 'os' from '?'>", f(os)
 
 assert f(os.system) == '<built-in function system>', f(os.system)
 if ispy311:
@@ -122,13 +106,10 @@
 assert 'mercurial.utils.procutil' not in sys.modules
 from mercurial.utils import procutil
 
-if ispy3:
-    assert isinstance(procutil, _LazyModule)
-    assert f(procutil) == "<module 'mercurial.utils.procutil' from '?'>", f(
-        procutil
-    )
-else:
-    assert f(procutil) == "<unloaded module 'procutil'>", f(procutil)
+assert isinstance(procutil, _LazyModule)
+assert f(procutil) == "<module 'mercurial.utils.procutil' from '?'>", f(
+    procutil
+)
 
 assert f(procutil.system) == '<function system at 0x?>', f(procutil.system)
 assert procutil.__class__ == moduletype, procutil.__class__
@@ -140,84 +121,51 @@
 assert 'mercurial.hgweb' not in sys.modules
 from mercurial import hgweb
 
-if ispy3:
-    assert isinstance(hgweb, _LazyModule)
-    assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
-    assert isinstance(hgweb.hgweb_mod, _LazyModule)
-    assert (
-        f(hgweb.hgweb_mod) == "<module 'mercurial.hgweb.hgweb_mod' from '?'>"
-    ), f(hgweb.hgweb_mod)
-else:
-    assert f(hgweb) == "<unloaded module 'hgweb'>", f(hgweb)
-    assert f(hgweb.hgweb_mod) == "<unloaded module 'hgweb_mod'>", f(
-        hgweb.hgweb_mod
-    )
+assert isinstance(hgweb, _LazyModule)
+assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
+assert isinstance(hgweb.hgweb_mod, _LazyModule)
+assert f(hgweb.hgweb_mod) == "<module 'mercurial.hgweb.hgweb_mod' from '?'>", f(
+    hgweb.hgweb_mod
+)
 
 assert f(hgweb) == "<module 'mercurial.hgweb' from '?'>", f(hgweb)
 
 import re as fred
 
-if ispy3:
-    assert not isinstance(fred, _LazyModule)
-    assert f(fred) == "<module 're' from '?'>"
-else:
-    assert f(fred) == "<unloaded module 're'>", f(fred)
+assert not isinstance(fred, _LazyModule)
+assert f(fred) == "<module 're' from '?'>"
 
 import re as remod
 
-if ispy3:
-    assert not isinstance(remod, _LazyModule)
-    assert f(remod) == "<module 're' from '?'>"
-else:
-    assert f(remod) == "<unloaded module 're'>", f(remod)
+assert not isinstance(remod, _LazyModule)
+assert f(remod) == "<module 're' from '?'>"
 
 import sys as re
 
-if ispy3:
-    assert not isinstance(re, _LazyModule)
-    assert f(re) == "<module 'sys' (built-in)>"
-else:
-    assert f(re) == "<unloaded module 'sys'>", f(re)
+assert not isinstance(re, _LazyModule)
+assert f(re) == "<module 'sys' (built-in)>"
 
-if ispy3:
-    assert not isinstance(fred, _LazyModule)
-    assert f(fred) == "<module 're' from '?'>", f(fred)
-else:
-    assert f(fred) == "<unloaded module 're'>", f(fred)
+assert not isinstance(fred, _LazyModule)
+assert f(fred) == "<module 're' from '?'>", f(fred)
 
 assert f(fred.sub) == '<function sub at 0x?>', f(fred.sub)
 
-if ispy3:
-    assert not isinstance(fred, _LazyModule)
-    assert f(fred) == "<module 're' from '?'>", f(fred)
-else:
-    assert f(fred) == "<proxied module 're'>", f(fred)
+assert not isinstance(fred, _LazyModule)
+assert f(fred) == "<module 're' from '?'>", f(fred)
 
 remod.escape  # use remod
 assert f(remod) == "<module 're' from '?'>", f(remod)
 
-if ispy3:
-    assert not isinstance(re, _LazyModule)
-    assert f(re) == "<module 'sys' (built-in)>"
-    assert f(type(re.stderr)) == "<class '_io.TextIOWrapper'>", f(
-        type(re.stderr)
-    )
-    assert f(re) == "<module 'sys' (built-in)>"
-else:
-    assert f(re) == "<unloaded module 'sys'>", f(re)
-    assert f(re.stderr) == "<open file '<whatever>', mode 'w' at 0x?>", f(
-        re.stderr
-    )
-    assert f(re) == "<proxied module 'sys'>", f(re)
+assert not isinstance(re, _LazyModule)
+assert f(re) == "<module 'sys' (built-in)>"
+assert f(type(re.stderr)) == "<class '_io.TextIOWrapper'>", f(type(re.stderr))
+assert f(re) == "<module 'sys' (built-in)>"
 
 assert 'telnetlib' not in sys.modules
 import telnetlib
 
-if ispy3:
-    assert isinstance(telnetlib, _LazyModule)
-    assert f(telnetlib) == "<module 'telnetlib' from '?'>"
-else:
-    assert f(telnetlib) == "<unloaded module 'telnetlib'>", f(telnetlib)
+assert isinstance(telnetlib, _LazyModule)
+assert f(telnetlib) == "<module 'telnetlib' from '?'>"
 
 try:
     from telnetlib import unknownattr
@@ -240,3 +188,11 @@
 zipfileimp = __import__('ftplib', globals(), locals(), ['unknownattr'])
 assert f(zipfileimp) == "<module 'ftplib' from '?'>", f(zipfileimp)
 assert not util.safehasattr(zipfileimp, 'unknownattr')
+
+
+# test deactivation for issue6725
+del sys.modules['telnetlib']
+with demandimport.deactivated():
+    import telnetlib
+assert telnetlib.__loader__ == telnetlib.__spec__.loader
+assert telnetlib.__loader__.get_resource_reader
--- a/tests/test-dirstate-backup.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-dirstate-backup.t	Thu Mar 02 22:45:44 2023 +0100
@@ -2,6 +2,9 @@
 
   $ hg init repo
   $ cd repo
+  $ echo a > a
+  $ hg add a
+  $ hg commit -m a
 
 Try to import an empty patch
 
--- a/tests/test-doctest.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-doctest.py	Thu Mar 02 22:45:44 2023 +0100
@@ -7,8 +7,6 @@
 import subprocess
 import sys
 
-ispy3 = sys.version_info[0] >= 3
-
 if 'TERM' in os.environ:
     del os.environ['TERM']
 
@@ -40,9 +38,7 @@
 
     # minimal copy of doctest.testmod()
     finder = doctest.DocTestFinder()
-    checker = None
-    if ispy3:
-        checker = py3docchecker()
+    checker = py3docchecker()
     runner = doctest.DocTestRunner(checker=checker, optionflags=optionflags)
     for test in finder.find(mod, name):
         runner.run(test)
@@ -91,8 +87,7 @@
         if not re.search(br'\n\s*>>>', fh.read()):
             continue
 
-    if ispy3:
-        f = f.decode()
+    f = f.decode()
 
     modname = f.replace('.py', '').replace('\\', '.').replace('/', '.')
 
--- a/tests/test-empty.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-empty.t	Thu Mar 02 22:45:44 2023 +0100
@@ -9,12 +9,7 @@
   $ hg grep wah
   [1]
   $ hg manifest
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 0 changesets with 0 changes to 0 files
+  $ hg verify -q
 
 Check the basic files created:
 
@@ -37,19 +32,15 @@
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd b
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 0 changesets with 0 changes to 0 files
+  $ hg verify -q
   $ ls .hg
   00changelog.i
+  branch
   cache
-  dirstate
   hgrc
   requires
   store
+  undo.backup.branch
   wcache
 
 Should be empty (except for the "basic" requires):
--- a/tests/test-excessive-merge.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-excessive-merge.t	Thu Mar 02 22:45:44 2023 +0100
@@ -93,9 +93,4 @@
        0       0 2ed2a3912a0b 000000000000 000000000000
        1       1 79d7492df40a 2ed2a3912a0b 000000000000
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 4 changes to 2 files
+  $ hg verify -q
--- a/tests/test-extension.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-extension.t	Thu Mar 02 22:45:44 2023 +0100
@@ -574,9 +574,9 @@
 module stub. Our custom lazy importer for Python 2 always returns a stub.
 
   $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.checkrelativity=$TESTTMP/checkrelativity.py checkrelativity) || true
-  *** failed to import extension "checkrelativity" from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist' (py3 !)
-  hg: unknown command 'checkrelativity' (py3 !)
-  (use 'hg help' for a list of commands) (py3 !)
+  *** failed to import extension "checkrelativity" from $TESTTMP/checkrelativity.py: No module named 'extlibroot.lsub1.lsub2.notexist'
+  hg: unknown command 'checkrelativity'
+  (use 'hg help' for a list of commands)
 
 #endif
 
@@ -1863,7 +1863,7 @@
   > test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
   > EOF
   $ hg -R $TESTTMP/opt-unicode-default dummy
-  *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode 'value' found in cmdtable.dummy (py3 !)
+  *** failed to import extension "test_unicode_default_value" from $TESTTMP/test_unicode_default_value.py: unicode 'value' found in cmdtable.dummy
   *** (use b'' to make it byte string)
   hg: unknown command 'dummy'
   (did you mean summary?)
--- a/tests/test-filebranch.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-filebranch.t	Thu Mar 02 22:45:44 2023 +0100
@@ -135,11 +135,6 @@
 
   $ hg status
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 10 changes to 4 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-filecache.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-filecache.py	Thu Mar 02 22:45:44 2023 +0100
@@ -165,7 +165,7 @@
 
 def test_filecache_synced():
     # test old behavior that caused filecached properties to go out of sync
-    os.system('hg init && echo a >> a && hg ci -qAm.')
+    os.system('hg init && echo a >> a && hg add a && hg ci -qm.')
     repo = hg.repository(uimod.ui.load())
     # first rollback clears the filecache, but changelog to stays in __dict__
     repo.rollback()
--- a/tests/test-flagprocessor.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-flagprocessor.t	Thu Mar 02 22:45:44 2023 +0100
@@ -213,11 +213,11 @@
     File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob) (no-pyoxidizer !)
     File "mercurial.revlogutils.flagutil", line *, in insertflagprocessor (glob) (pyoxidizer !)
       raise error.Abort(msg)
-  mercurial.error.Abort: cannot register multiple processors on flag '0x8'. (py3 !)
+  mercurial.error.Abort: cannot register multiple processors on flag '0x8'.
   *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
   $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
     File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
-  mercurial.error.Abort: cannot register multiple processors on flag '0x8'. (py3 !)
+  mercurial.error.Abort: cannot register multiple processors on flag '0x8'.
   *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
     File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
 
--- a/tests/test-fncache.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-fncache.t	Thu Mar 02 22:45:44 2023 +0100
@@ -49,12 +49,7 @@
 
 Testing verify:
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
   $ rm .hg/store/fncache
 
@@ -66,6 +61,7 @@
    warning: revlog 'data/a.i' not in fncache!
    warning: revlog 'data/a.i.hg/c.i' not in fncache!
    warning: revlog 'data/a.i/b.i' not in fncache!
+  checking dirstate
   checked 3 changesets with 3 changes to 3 files
   3 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
@@ -78,12 +74,7 @@
   adding data/a.i/b.i
   3 items added, 0 removed from fncache
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
   $ cd ..
 
@@ -99,6 +90,7 @@
   .hg
   .hg/00changelog.i
   .hg/00manifest.i
+  .hg/branch
   .hg/cache
   .hg/cache/branch2-served
   .hg/cache/rbc-names-v1
@@ -112,13 +104,9 @@
   .hg/phaseroots
   .hg/requires
   .hg/undo
-  .hg/undo.backup.dirstate
+  .hg/undo.backup.branch
   .hg/undo.backupfiles
-  .hg/undo.bookmarks
-  .hg/undo.branch
   .hg/undo.desc
-  .hg/undo.dirstate
-  .hg/undo.phaseroots
   .hg/wcache
   .hg/wcache/checkisexec (execbit !)
   .hg/wcache/checklink (symlink !)
@@ -137,6 +125,7 @@
   $ find .hg | sort
   .hg
   .hg/00changelog.i
+  .hg/branch
   .hg/cache
   .hg/cache/branch2-served
   .hg/cache/rbc-names-v1
@@ -155,12 +144,8 @@
   .hg/store/requires
   .hg/store/undo
   .hg/store/undo.backupfiles
-  .hg/store/undo.phaseroots
-  .hg/undo.backup.dirstate
-  .hg/undo.bookmarks
-  .hg/undo.branch
+  .hg/undo.backup.branch
   .hg/undo.desc
-  .hg/undo.dirstate
   .hg/wcache
   .hg/wcache/checkisexec (execbit !)
   .hg/wcache/checklink (symlink !)
@@ -313,6 +298,7 @@
 
   $ cat > ../exceptionext.py <<EOF
   > import os
+  > import signal
   > from mercurial import (
   >   commands,
   >   error,
@@ -324,19 +310,14 @@
   > def trwrapper(orig, self, *args, **kwargs):
   >     tr = orig(self, *args, **kwargs)
   >     def fail(tr):
-  >         raise error.Abort(b"forced transaction failure")
+  >         os.kill(os.getpid(), signal.SIGKILL)
   >     # zzz prefix to ensure it sorted after store.write
   >     tr.addfinalize(b'zzz-forcefails', fail)
   >     return tr
   > 
-  > def abortwrapper(orig, self, *args, **kwargs):
-  >     raise error.Abort(b"forced transaction failure")
-  > 
   > def uisetup(ui):
   >     extensions.wrapfunction(localrepo.localrepository, 'transaction',
   >                             trwrapper)
-  >     extensions.wrapfunction(transaction.transaction, '_abort',
-  >                             abortwrapper)
   > 
   > cmdtable = {}
   > 
@@ -348,8 +329,12 @@
 
   $ hg up -q 1
   $ touch z
-  $ hg ci -qAm z 2>/dev/null
-  [255]
+# Cannot rely on the return code value as chg use a different one.
+# So we use a `|| echo` trick
+# XXX-CHG fixing chg behavior would be nice here.
+  $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
+  *Killed* (glob) (?)
+  He's Dead, Jim.
   $ cat .hg/store/fncache | sort
   data/y.i
   data/z.i
@@ -359,6 +344,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 1 changesets with 1 changes to 1 files
   $ cat .hg/store/fncache
   data/y.i
--- a/tests/test-git-interop.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-git-interop.t	Thu Mar 02 22:45:44 2023 +0100
@@ -142,6 +142,12 @@
    alpha
   +blah
 
+status --all shows all files, including clean:
+  $ hg status --all
+  M alpha
+  ? gamma
+  C beta
+
 Remove a file, it shows as such:
   $ rm alpha
   $ hg status
@@ -306,7 +312,7 @@
   
   $ hg log -r dead
   abort: unknown revision 'dead'
-  [255]
+  [10]
 
 This coveres changelog.findmissing()
   $ hg merge --preview 3d9be8deba43
--- a/tests/test-globalopts.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-globalopts.t	Thu Mar 02 22:45:44 2023 +0100
@@ -272,7 +272,7 @@
 #if no-chg no-rhg
   $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
   Traceback (most recent call last):
-  Traceback (most recent call last): (py3 !)
+  Traceback (most recent call last):
 #else
 Traceback for '--config' errors not supported with chg.
   $ hg --cwd c --config x --traceback id 2>&1 | grep -i 'traceback'
--- a/tests/test-hardlinks.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-hardlinks.t	Thu Mar 02 22:45:44 2023 +0100
@@ -54,7 +54,6 @@
   1 r1/.hg/store/undo
   1 r1/.hg/store/undo.backup.fncache (repofncache !)
   1 r1/.hg/store/undo.backupfiles
-  1 r1/.hg/store/undo.phaseroots
 
 
 Create hardlinked clone r2:
@@ -96,7 +95,6 @@
   1 r1/.hg/store/undo
   1 r1/.hg/store/undo.backup.fncache (repofncache !)
   1 r1/.hg/store/undo.backupfiles
-  1 r1/.hg/store/undo.phaseroots
 
   $ nlinksdir r2/.hg/store
   2 r2/.hg/store/00changelog.i
@@ -118,7 +116,6 @@
   1 r3/.hg/store/requires
   1 r3/.hg/store/undo
   1 r3/.hg/store/undo.backupfiles
-  1 r3/.hg/store/undo.phaseroots
 
 
 Create a non-inlined filelog in r3:
@@ -147,16 +144,10 @@
   1 r3/.hg/store/undo.backup.fncache (repofncache !)
   1 r3/.hg/store/undo.backup.phaseroots
   1 r3/.hg/store/undo.backupfiles
-  1 r3/.hg/store/undo.phaseroots
 
 Push to repo r1 should break up most hardlinks in r2:
 
-  $ hg -R r2 verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
+  $ hg -R r2 verify -q
 
   $ cd r3
   $ hg push
@@ -182,13 +173,7 @@
   1 r2/.hg/store/fncache
 #endif
 
-  $ hg -R r2 verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
-
+  $ hg -R r2 verify -q
 
   $ cd r1
   $ hg up
@@ -242,7 +227,7 @@
 
   $ nlinksdir r4
   2 r4/.hg/00changelog.i
-  2 r4/.hg/branch
+  [24] r4/.hg/branch (re)
   2 r4/.hg/cache/branch2-base
   2 r4/.hg/cache/branch2-immutable
   2 r4/.hg/cache/branch2-served
@@ -271,12 +256,9 @@
   2 r4/.hg/store/undo.backup.fncache (repofncache !)
   2 r4/.hg/store/undo.backup.phaseroots
   2 r4/.hg/store/undo.backupfiles
-  2 r4/.hg/store/undo.phaseroots
-  [24] r4/\.hg/undo\.backup\.dirstate (re)
-  2 r4/.hg/undo.bookmarks
-  2 r4/.hg/undo.branch
+  [24] r4/.hg/undo.backup.branch (re)
+  2 r4/\.hg/undo\.backup\.dirstate (re)
   2 r4/.hg/undo.desc
-  [24] r4/\.hg/undo\.dirstate (re)
   2 r4/.hg/wcache/checkisexec (execbit !)
   2 r4/.hg/wcache/checklink-target (symlink !)
   2 r4/.hg/wcache/checknoexec (execbit !)
@@ -288,9 +270,9 @@
 
 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
 #if hardlink-whitelisted
-  $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
-  4 r4/.hg/undo.backup.dirstate
-  4 r4/.hg/undo.dirstate
+  $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
+  2 r4/.hg/dirstate
+  2 r4/.hg/undo.backup.dirstate
 #endif
 
 
@@ -329,12 +311,9 @@
   2 r4/.hg/store/undo.backup.fncache (repofncache !)
   2 r4/.hg/store/undo.backup.phaseroots
   2 r4/.hg/store/undo.backupfiles
-  2 r4/.hg/store/undo.phaseroots
-  [24] r4/\.hg/undo\.backup\.dirstate (re)
-  2 r4/.hg/undo.bookmarks
-  2 r4/.hg/undo.branch
+  [23] r4/.hg/undo.backup.branch (re)
+  2 r4/\.hg/undo\.backup\.dirstate (re)
   2 r4/.hg/undo.desc
-  [24] r4/\.hg/undo\.dirstate (re)
   2 r4/.hg/wcache/checkisexec (execbit !)
   2 r4/.hg/wcache/checklink-target (symlink !)
   2 r4/.hg/wcache/checknoexec (execbit !)
@@ -346,9 +325,9 @@
   2 r4/f3 (no-execbit !)
 
 #if hardlink-whitelisted
-  $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
-  4 r4/.hg/undo.backup.dirstate
-  4 r4/.hg/undo.dirstate
+  $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/dirstate
+  1 r4/.hg/dirstate
+  2 r4/.hg/undo.backup.dirstate
 #endif
 
 Test hardlinking outside hg:
--- a/tests/test-help.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-help.t	Thu Mar 02 22:45:44 2023 +0100
@@ -985,6 +985,8 @@
                  details.
    debug-revlog-index
                  dump index data for a revlog
+   debug-revlog-stats
+                 display statistics about revlogs in the store
    debugancestor
                  find the ancestor revision of two revisions in a given index
    debugantivirusrunning
@@ -2170,8 +2172,11 @@
   
       ":union"
         Uses the internal non-interactive simple merge algorithm for merging
-        files. It will use both left and right sides for conflict regions. No
-        markers are inserted.
+        files. It will use both local and other sides for conflict regions by
+        adding local on top of other. No markers are inserted.
+  
+      ":union-other-first"
+        Like :union, but add other on top of local.
   
       Internal tools are always available and do not require a GUI but will by
       default not handle symlinks or binary files. See next section for detail
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-hgweb-head.t	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,102 @@
+#require serve
+
+Some tests for hgweb responding to HEAD requests
+
+  $ hg init test
+  $ cd test
+  $ mkdir da
+  $ echo foo > da/foo
+  $ echo foo > foo
+  $ hg ci -Ambase
+  adding da/foo
+  adding foo
+  $ hg bookmark -r0 '@'
+  $ hg bookmark -r0 'a b c'
+  $ hg bookmark -r0 'd/e/f'
+  $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
+  $ cat hg.pid >> $DAEMON_PIDS
+
+manifest
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/?style=raw' - date etag server
+  200 Script output follows
+  content-type: text/plain; charset=ascii
+  
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/da?style=raw' - date etag server
+  200 Script output follows
+  content-type: text/plain; charset=ascii
+  
+
+plain file
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/foo?style=raw' - date etag server
+  200 Script output follows
+  content-disposition: inline; filename="foo"
+  content-length: 4
+  content-type: application/binary
+  
+
+should give a 404 - static file that does not exist
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'static/bogus' - date etag server
+  404 Not Found
+  content-type: text/html; charset=ascii
+  
+  [1]
+
+should give a 404 - bad revision
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/spam/foo?style=raw' - date etag server
+  404 Not Found
+  content-type: text/plain; charset=ascii
+  
+  [1]
+
+should give a 400 - bad command
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/foo?cmd=spam&style=raw' - date etag server
+  400* (glob)
+  content-type: text/plain; charset=ascii
+  
+  [1]
+
+should give a 404 - file does not exist
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/bork?style=raw' - date etag server
+  404 Not Found
+  content-type: text/plain; charset=ascii
+  
+  [1]
+
+try bad style
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'file/tip/?style=foobar' - date etag server
+  200 Script output follows
+  content-type: text/html; charset=ascii
+  
+
+log
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'log?style=raw' - date etag server
+  200 Script output follows
+  content-type: text/plain; charset=ascii
+  
+
+access bookmarks
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'rev/@?style=paper' - date etag server
+  200 Script output follows
+  content-type: text/html; charset=ascii
+  
+
+static file
+
+  $ get-with-headers.py localhost:$HGPORT --method=HEAD 'static/style-gitweb.css' - date etag server
+  200 Script output follows
+  content-length: 9074
+  content-type: text/css
+  
+
+  $ killdaemons.py
+
+  $ cd ..
--- a/tests/test-hook.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-hook.t	Thu Mar 02 22:45:44 2023 +0100
@@ -462,13 +462,11 @@
   00manifest.i
   data
   fncache (repofncache !)
-  journal.phaseroots
   phaseroots
   requires
   undo
   undo.backup.fncache (repofncache !)
   undo.backupfiles
-  undo.phaseroots
 
 
 precommit hook can prevent commit
@@ -644,6 +642,15 @@
   HG_TXNNAME=push
   HG_URL=file:$TESTTMP/a
   
+  txnabort Python hook: bundle2,changes,source,txnid,txnname,url
+  txnabort hook: HG_BUNDLE2=1
+  HG_HOOKNAME=txnabort.1
+  HG_HOOKTYPE=txnabort
+  HG_SOURCE=push
+  HG_TXNID=TXN:$ID$
+  HG_TXNNAME=push
+  HG_URL=file:$TESTTMP/a
+  
   abort: prepushkey hook exited with status 1
   [40]
   $ cd ../a
@@ -975,19 +982,19 @@
   Traceback (most recent call last):
   SyntaxError: * (glob)
   exception from second failed import attempt:
-  Traceback (most recent call last): (py3 !)
-  SyntaxError: * (glob) (py3 !)
   Traceback (most recent call last):
-  ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
+  SyntaxError: * (glob)
+  Traceback (most recent call last):
+  ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
   Traceback (most recent call last):
-  SyntaxError: * (glob) (py3 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'hgext_syntaxerror' (py3 no-py36 !)
+  SyntaxError: * (glob)
+  Traceback (most recent call last):
+  ImportError: No module named 'hgext_syntaxerror' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext_syntaxerror' (py36 !)
-  Traceback (most recent call last): (py3 !)
+  Traceback (most recent call last):
       raise error.HookLoadError( (py38 !)
-  mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed (py3 !)
+  mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
   abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
 
   $ echo '[hooks]' > ../a/.hg/hgrc
@@ -1120,7 +1127,7 @@
 
   $ hg id
   loading pre-identify.npmd hook failed:
-  abort: No module named 'repo' (py3 !)
+  abort: No module named 'repo'
   [255]
 
   $ cd ../../b
@@ -1140,24 +1147,24 @@
   $ hg --traceback commit -ma 2>&1 | egrep '^exception|ImportError|ModuleNotFoundError|Traceback|HookLoadError|abort'
   exception from first failed import attempt:
   Traceback (most recent call last):
-  ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
+  ImportError: No module named 'somebogusmodule' (no-py36 !)
   ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
   exception from second failed import attempt:
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
-  ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
-  ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
-  Traceback (most recent call last): (py3 !)
-  ImportError: No module named 'somebogusmodule' (py3 no-py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'somebogusmodule' (no-py36 !)
   ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
   Traceback (most recent call last):
-  ImportError: No module named 'hgext_importfail' (py3 no-py36 !)
+  ImportError: No module named 'hgext_importfail' (no-py36 !)
+  ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'somebogusmodule' (no-py36 !)
+  ModuleNotFoundError: No module named 'somebogusmodule' (py36 !)
+  Traceback (most recent call last):
+  ImportError: No module named 'hgext_importfail' (no-py36 !)
   ModuleNotFoundError: No module named 'hgext_importfail' (py36 !)
   Traceback (most recent call last):
       raise error.HookLoadError( (py38 !)
-  mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed (py3 !)
+  mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
   abort: precommit.importfail hook is invalid: import of "importfail" failed
 
 Issue1827: Hooks Update & Commit not completely post operation
--- a/tests/test-http-bad-server.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-http-bad-server.t	Thu Mar 02 22:45:44 2023 +0100
@@ -132,8 +132,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
   readline(*) -> (1?) Accept-Encoding* (glob)
   read limit reached; closing socket
@@ -174,8 +174,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob)
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -193,8 +193,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
   sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
-  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
-  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
+  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
+  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py36 !)
   readline(24 from ~) -> (*) GET /?cmd=getbundle HTTP* (glob)
   read limit reached; closing socket
   readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
@@ -230,8 +230,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx httppostargs known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (27) POST /?cmd=batch HTTP/1.1\r\n (glob)
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (41) content-type: application/mercurial-0.1\r\n (glob)
@@ -256,7 +256,7 @@
   Traceback (most recent call last):
   Exception: connection closed after receiving N bytes
   
-  write(126) -> HTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(126) -> HTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
 
   $ rm -f error.log
 
@@ -283,13 +283,13 @@
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
   sendall(1 from 160) -> (0) H (py36 !)
-  write(1 from 160) -> (0) H (py3 no-py36 !)
+  write(1 from 160) -> (0) H (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
   Traceback (most recent call last):
   Exception: connection closed after sending N bytes
   
-  write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (glob) (py3 no-py36 !)
+  write(286) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (glob) (no-py36 !)
 
   $ rm -f error.log
 
@@ -317,8 +317,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(20 from *) -> (0) batch branchmap bund (glob) (py36 !)
-  write(160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(20 from *) -> (0) batch branchmap bund (glob) (py3 no-py36 !)
+  write(160) -> (20) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(20 from *) -> (0) batch branchmap bund (glob) (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=capabilities': (glob)
   Traceback (most recent call last):
@@ -356,8 +356,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> (568) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -368,13 +368,13 @@
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
   sendall(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (py36 !)
-  write(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (py3 no-py36 !)
+  write(118 from 159) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: applicat (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
   Traceback (most recent call last):
   Exception: connection closed after sending N bytes
   
-  write(285) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(285) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
 
   $ rm -f error.log
 
@@ -402,8 +402,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -415,8 +415,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
   sendall(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (py36 !)
-  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
-  write(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (py3 no-py36 !)
+  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
+  write(24 from 42) -> (0) 96ee1d7354c4ad7372047672 (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=batch': (glob)
   Traceback (most recent call last):
@@ -455,8 +455,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -468,8 +468,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
   sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
-  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
-  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
+  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
+  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py36 !)
   readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -480,13 +480,13 @@
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
   sendall(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (py36 !)
-  write(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (py3 no-py36 !)
+  write(129 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercuri (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
   Traceback (most recent call last):
   Exception: connection closed after sending N bytes
   
-  write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
 
   $ rm -f error.log
 
@@ -522,7 +522,7 @@
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
   Traceback (most recent call last):
   Exception: connection closed after sending N bytes
-  write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n
   
 #endif
 
@@ -553,8 +553,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -566,8 +566,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
   sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
-  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
-  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py3 no-py36 !)
+  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
+  write(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (no-py36 !)
   readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -578,13 +578,13 @@
   readline(*) -> (49) user-agent: mercurial/proto-1.0 (Mercurial 4.2)\r\n (glob)
   readline(*) -> (2) \r\n (glob)
   sendall(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py36 !)
-  write(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(167 from 167) -> (0) HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
   Traceback (most recent call last):
   Exception: connection closed after sending N bytes
   
-  write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(293) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\nHTTP/1.1 500 Internal Server Error\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
 
   $ rm -f error.log
 
@@ -613,8 +613,8 @@
   readline(*) -> (2) \r\n (glob)
   sendall(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py36 !)
   sendall(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py36 !)
-  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (py3 no-py36 !)
-  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (py3 no-py36 !)
+  write(160) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: *\r\n\r\n (glob) (no-py36 !)
+  write(*) -> batch branchmap $USUAL_BUNDLE2_CAPS_NO_PHASES$ changegroupsubset compression=none getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=* unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash (glob) (no-py36 !)
   readline(~) -> (26) GET /?cmd=batch HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -626,7 +626,7 @@
   readline(*) -> (2) \r\n (glob)
   sendall(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py36 !)
   sendall(42) -> 96ee1d7354c4ad7372047672c36a1f561e3a6a4c\n; (py36 !)
-  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (py3 no-py36 !)
+  write(159) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.1\r\nContent-Length: 42\r\n\r\n (no-py36 !)
   readline(~) -> (30) GET /?cmd=getbundle HTTP/1.1\r\n
   readline(*) -> (27) Accept-Encoding: identity\r\n (glob)
   readline(*) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob)
@@ -640,7 +640,7 @@
   sendall(6) -> 1\\r\\n\x04\\r\\n (esc) (py36 !)
   sendall(9) -> 4\r\nnone\r\n (py36 !)
   sendall(9 from 9) -> (0) 4\r\nHG20\r\n (py36 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 no-py36 !)
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (no-py36 !)
   write limit reached; closing socket
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/?cmd=getbundle': (glob)
   Traceback (most recent call last):
@@ -679,8 +679,8 @@
 
 #else
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -11
-  readline(~) -> (2) \r\n (py3 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  readline(~) -> (2) \r\n
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
   write(6 from 9) -> (0) 4\r\nHG2
@@ -724,8 +724,8 @@
 
 #else
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
-  readline(~) -> (2) \r\n (py3 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  readline(~) -> (2) \r\n
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(41) -> Content-Type: application/mercurial-0.2\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
@@ -771,8 +771,8 @@
 
 #else
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -12
-  readline(~) -> (2) \r\n (py3 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  readline(~) -> (2) \r\n
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(41) -> Content-Type: application/mercurial-0.2\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
@@ -820,8 +820,8 @@
 #else
 
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -13
-  readline(~) -> (2) \r\n (py3 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  readline(~) -> (2) \r\n
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(41) -> Content-Type: application/mercurial-0.2\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
@@ -873,8 +873,8 @@
 
 #else
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -14
-  readline(~) -> (2) \r\n (py3 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  readline(~) -> (2) \r\n
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(41) -> Content-Type: application/mercurial-0.2\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
@@ -929,7 +929,7 @@
 
 #else
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -15
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(28) -> Transfer-Encoding: chunked\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
@@ -986,8 +986,8 @@
 
 #else
   $ "$PYTHON" $TESTDIR/filtertraceback.py < error.log | tail -16
-  readline(~) -> (2) \r\n (py3 !)
-  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n (py3 !)
+  readline(~) -> (2) \r\n
+  write(167) -> HTTP/1.1 200 Script output follows\r\nServer: badhttpserver\r\nDate: $HTTP_DATE$\r\nContent-Type: application/mercurial-0.2\r\nTransfer-Encoding: chunked\r\n\r\n
   write(41) -> Content-Type: application/mercurial-0.2\r\n
   write(6) -> 1\\r\\n\x04\\r\\n (esc)
   write(9) -> 4\r\nnone\r\n
--- a/tests/test-http-bundle1.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-http-bundle1.t	Thu Mar 02 22:45:44 2023 +0100
@@ -45,12 +45,7 @@
   no changes found
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R copy
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 4 changes to 4 files
+  $ hg verify -R copy -q
 #endif
 
 try to clone via stream, should use pull instead
@@ -99,12 +94,7 @@
   new changesets 8b6053c928fe
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R copy-pull
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 4 changes to 4 files
+  $ hg verify -R copy-pull -q
   $ cd test
   $ echo bar > bar
   $ hg commit -A -d '1 0' -m 2
--- a/tests/test-http-clone-r.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-http-clone-r.t	Thu Mar 02 22:45:44 2023 +0100
@@ -25,7 +25,7 @@
   $ for i in 0 1 2 3 4 5 6 7 8; do
   >    hg clone -r "$i" http://localhost:$HGPORT/ test-"$i"
   >    if cd test-"$i"; then
-  >       hg verify
+  >       hg verify -q
   >       cd ..
   >    fi
   > done
@@ -36,11 +36,6 @@
   new changesets bfaf4b5cbf01
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -48,11 +43,6 @@
   new changesets bfaf4b5cbf01:21f32785131f
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -60,11 +50,6 @@
   new changesets bfaf4b5cbf01:4ce51a113780
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -72,11 +57,6 @@
   new changesets bfaf4b5cbf01:93ee6ab32777
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 4 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -84,11 +64,6 @@
   new changesets bfaf4b5cbf01:c70afb1ee985
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -96,11 +71,6 @@
   new changesets bfaf4b5cbf01:f03ae5a9b979
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -108,11 +78,6 @@
   new changesets bfaf4b5cbf01:095cb14b1b4d
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 5 changes to 2 files
   adding changesets
   adding manifests
   adding file changes
@@ -120,11 +85,6 @@
   new changesets bfaf4b5cbf01:faa2e4234c7a
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 6 changes to 3 files
   adding changesets
   adding manifests
   adding file changes
@@ -132,11 +92,6 @@
   new changesets bfaf4b5cbf01:916f1afdef90
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
   $ cd test-8
   $ hg pull ../test-7
   pulling from ../test-7
@@ -147,12 +102,7 @@
   added 4 changesets with 2 changes to 3 files (+1 heads)
   new changesets c70afb1ee985:faa2e4234c7a
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ cd ..
   $ cd test-1
   $ hg pull -r 4 http://localhost:$HGPORT/
@@ -164,12 +114,7 @@
   added 1 changesets with 0 changes to 0 files (+1 heads)
   new changesets c70afb1ee985
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 2 changes to 1 files
+  $ hg verify -q
   $ hg pull http://localhost:$HGPORT/
   pulling from http://localhost:$HGPORT/
   searching for changes
@@ -190,12 +135,7 @@
   added 2 changesets with 0 changes to 0 files (+1 heads)
   new changesets c70afb1ee985:f03ae5a9b979
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 3 changes to 1 files
+  $ hg verify -q
   $ hg pull http://localhost:$HGPORT/
   pulling from http://localhost:$HGPORT/
   searching for changes
@@ -205,12 +145,7 @@
   added 4 changesets with 4 changes to 4 files
   new changesets 93ee6ab32777:916f1afdef90
   (run 'hg update' to get a working copy)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ cd ..
 
 no default destination if url has no path:
--- a/tests/test-http-proxy.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-http-proxy.t	Thu Mar 02 22:45:44 2023 +0100
@@ -22,12 +22,7 @@
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd b
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
   $ cd ..
 
 url for proxy, pull
@@ -42,12 +37,7 @@
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd b-pull
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
   $ cd ..
 
 host:port for proxy
--- a/tests/test-http.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-http.t	Thu Mar 02 22:45:44 2023 +0100
@@ -34,12 +34,7 @@
   transferred * bytes in * seconds (*/sec) (glob)
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R copy
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 4 changes to 4 files
+  $ hg verify -R copy -q
 #endif
 
 try to clone via stream, should use pull instead
@@ -88,12 +83,7 @@
   new changesets 8b6053c928fe
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R copy-pull
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 4 changes to 4 files
+  $ hg verify -R copy-pull -q
   $ cd test
   $ echo bar > bar
   $ hg commit -A -d '1 0' -m 2
--- a/tests/test-https.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-https.t	Thu Mar 02 22:45:44 2023 +0100
@@ -137,12 +137,7 @@
   new changesets 8b6053c928fe
   updating to branch default
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg verify -R copy-pull
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 4 changes to 4 files
+  $ hg verify -R copy-pull -q
   $ cd test
   $ echo bar > bar
   $ hg commit -A -d '1 0' -m 2
--- a/tests/test-import-merge.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-import-merge.t	Thu Mar 02 22:45:44 2023 +0100
@@ -159,9 +159,4 @@
   rollback completed
   abort: patch is damaged or loses information
   [255]
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
+  $ hg verify -q
--- a/tests/test-incoming-outgoing.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-incoming-outgoing.t	Thu Mar 02 22:45:44 2023 +0100
@@ -7,12 +7,7 @@
   >     hg commit -A -m $i
   > done
   adding foo
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 9 changes to 1 files
+  $ hg verify -q
   $ hg serve -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid >> $DAEMON_PIDS
   $ cd ..
@@ -365,12 +360,7 @@
   >     echo $i >> foo
   >     hg commit -A -m $i
   > done
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 14 changesets with 14 changes to 1 files
+  $ hg verify -q
   $ cd ..
   $ hg -R test-dev outgoing test
   comparing with test
--- a/tests/test-infinitepush.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-infinitepush.t	Thu Mar 02 22:45:44 2023 +0100
@@ -46,8 +46,8 @@
   remote:     bc22f9a30a82  multihead1
   remote:     ee4802bf6864  multihead2
   $ scratchnodes
-  bc22f9a30a821118244deacbd732e394ed0b686c ab1bc557aa090a9e4145512c734b6e8a828393a5
-  ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f ab1bc557aa090a9e4145512c734b6e8a828393a5
+  bc22f9a30a821118244deacbd732e394ed0b686c de1b7d132ba98f0172cd974e3e69dfa80faa335c
+  ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f de1b7d132ba98f0172cd974e3e69dfa80faa335c
 
 Create two new scratch bookmarks
   $ hg up 0
--- a/tests/test-inherit-mode.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-inherit-mode.t	Thu Mar 02 22:45:44 2023 +0100
@@ -68,6 +68,7 @@
   $ "$PYTHON" ../printmodes.py .
   00700 ./.hg/
   00600 ./.hg/00changelog.i
+  00660 ./.hg/branch
   00770 ./.hg/cache/
   00660 ./.hg/cache/branch2-served
   00660 ./.hg/cache/rbc-names-v1
@@ -94,12 +95,8 @@
   00600 ./.hg/store/requires
   00660 ./.hg/store/undo
   00660 ./.hg/store/undo.backupfiles
-  00660 ./.hg/store/undo.phaseroots
-  00660 ./.hg/undo.backup.dirstate
-  00660 ./.hg/undo.bookmarks
-  00660 ./.hg/undo.branch
+  00660 ./.hg/undo.backup.branch
   00660 ./.hg/undo.desc
-  00660 ./.hg/undo.dirstate
   00770 ./.hg/wcache/
   00711 ./.hg/wcache/checkisexec
   007.. ./.hg/wcache/checklink (re)
@@ -133,11 +130,11 @@
   $ "$PYTHON" ../printmodes.py ../push
   00770 ../push/.hg/
   00660 ../push/.hg/00changelog.i
+  00660 ../push/.hg/branch
   00770 ../push/.hg/cache/
   00660 ../push/.hg/cache/branch2-base
   00660 ../push/.hg/cache/rbc-names-v1
   00660 ../push/.hg/cache/rbc-revs-v1
-  00660 ../push/.hg/dirstate
   00660 ../push/.hg/requires
   00770 ../push/.hg/store/
   00660 ../push/.hg/store/00changelog.i
@@ -156,11 +153,8 @@
   00660 ../push/.hg/store/requires
   00660 ../push/.hg/store/undo
   00660 ../push/.hg/store/undo.backupfiles
-  00660 ../push/.hg/store/undo.phaseroots
-  00660 ../push/.hg/undo.bookmarks
-  00660 ../push/.hg/undo.branch
+  00660 ../push/.hg/undo.backup.branch
   00660 ../push/.hg/undo.desc
-  00660 ../push/.hg/undo.dirstate
   00770 ../push/.hg/wcache/
 
 
--- a/tests/test-install.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-install.t	Thu Mar 02 22:45:44 2023 +0100
@@ -3,7 +3,7 @@
   checking encoding (ascii)...
   checking Python executable (*) (glob)
   checking Python implementation (*) (glob)
-  checking Python version (3.*) (glob) (py3 !)
+  checking Python version (3.*) (glob)
   checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
   checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
   checking Python security support (*) (glob)
@@ -68,7 +68,7 @@
   checking encoding (ascii)...
   checking Python executable (*) (glob)
   checking Python implementation (*) (glob)
-  checking Python version (3.*) (glob) (py3 !)
+  checking Python version (3.*) (glob)
   checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
   checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
   checking Python security support (*) (glob)
@@ -118,7 +118,7 @@
   checking encoding (ascii)...
   checking Python executable (*) (glob)
   checking Python implementation (*) (glob)
-  checking Python version (3.*) (glob) (py3 !)
+  checking Python version (3.*) (glob)
   checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
   checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
   checking Python security support (*) (glob)
@@ -148,7 +148,7 @@
   checking encoding (ascii)...
   checking Python executable (*) (glob)
   checking Python implementation (*) (glob)
-  checking Python version (3.*) (glob) (py3 !)
+  checking Python version (3.*) (glob)
   checking Python lib (.*[Ll]ib.*)... (re) (no-pyoxidizer !)
   checking Python lib (.*pyoxidizer.*)... (re) (pyoxidizer !)
   checking Python security support (*) (glob)
@@ -238,42 +238,3 @@
   checking username (test)
   no problems detected
 #endif
-
-#if virtualenv no-py3 network-io no-pyoxidizer
-
-Note: --no-site-packages is the default for all versions enabled by hghave
-
-  $ "$PYTHON" -m virtualenv installenv >> pip.log
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
-
-Note: we use this weird path to run pip and hg to avoid platform differences,
-since it's bin on most platforms but Scripts on Windows.
-  $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
-  DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality. (?)
-  $ ./installenv/*/hg debuginstall || cat pip.log
-  checking encoding (ascii)...
-  checking Python executable (*) (glob)
-  checking Python implementation (*) (glob)
-  checking Python version (2.*) (glob)
-  checking Python lib (*)... (glob)
-  checking Python security support (*) (glob)
-    TLS 1.2 not supported by Python install; network connections lack modern security (?)
-    SNI not supported by Python install; may have connectivity issues with some servers (?)
-  checking Rust extensions \((installed|missing)\) (re)
-  checking Mercurial version (*) (glob)
-  checking Mercurial custom build (*) (glob)
-  checking module policy (*) (glob)
-  checking installed modules (*/mercurial)... (glob)
-  checking registered compression engines (*) (glob)
-  checking available compression engines (*) (glob)
-  checking available compression engines for wire protocol (*) (glob)
-  checking "re2" regexp engine \((available|missing)\) (re)
-  checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
-  checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
-  checking commit editor... (*) (glob)
-  checking username (test)
-  no problems detected
-#endif
--- a/tests/test-issue1175.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-issue1175.t	Thu Mar 02 22:45:44 2023 +0100
@@ -37,12 +37,7 @@
   updating the branch cache
   committed changeset 5:83a687e8a97c80992ba385bbfd766be181bfb1d1
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 6 changesets with 4 changes to 4 files
+  $ hg verify -q
 
   $ hg export --git tip
   # HG changeset patch
--- a/tests/test-journal-exists.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-journal-exists.t	Thu Mar 02 22:45:44 2023 +0100
@@ -25,13 +25,7 @@
   abort: abandoned transaction found
   (run 'hg recover' to clean up transaction)
   [255]
-  $ hg recover --verify
-  rolling back interrupted transaction
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg recover --verify  -q
 
 recover, no verify
 
--- a/tests/test-keyword.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-keyword.t	Thu Mar 02 22:45:44 2023 +0100
@@ -492,7 +492,8 @@
   $ echo '$Id$' > y
   $ echo '$Id$' > z
   $ hg add y
-  $ hg commit -Am "rollback only" z
+  $ hg add z
+  $ hg commit -m "rollback only" z
   $ cat z
   $Id: z,v 45a5d3adce53 1970/01/01 00:00:00 test $
   $ hg --verbose rollback
@@ -838,12 +839,7 @@
 
   $ hg status
   ? c
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 4 changes to 3 files
+  $ hg verify -q
   $ cat a b
   expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
   do not process $Id:
--- a/tests/test-largefiles-small-disk.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-largefiles-small-disk.t	Thu Mar 02 22:45:44 2023 +0100
@@ -10,7 +10,7 @@
   > _origcopyfileobj = shutil.copyfileobj
   > def copyfileobj(fsrc, fdst, length=16 * 1024):
   >     # allow journal files (used by transaction) to be written
-  >     if b'journal.' in fdst.name:
+  >     if b'journal.' in fdst.name or b'backup.' in fdst.name:
   >         return _origcopyfileobj(fsrc, fdst, length)
   >     fdst.write(fsrc.read(4))
   >     raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
--- a/tests/test-largefiles-update.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-largefiles-update.t	Thu Mar 02 22:45:44 2023 +0100
@@ -771,14 +771,26 @@
   $ hg log -qr 'file("set:exec()")'
   9:be1b433a65b1
 
-Test a fatal error interrupting an update. Verify that status report dirty
-files correctly after an interrupted update. Also verify that checking all
-hashes reveals it isn't clean.
+Test a fatal error interrupting an update
+-----------------------------------------
+
+In a previous version this test was tasked to:
+| verify that status report dirty files correctly after an interrupted
+| update. Also verify that checking all hashes reveals it isn't clean.
+
+In the mean time improvement to the update logic means it is much harder to get the dirstate file written too early. So the original intend seems "fine".
+
+However, it shows another error where the standin file for large1 seems to be
+silently updated, confusing the general logic. This seems to have been broken
+before our updates and the test is marked as such.
 
 Start with clean dirstates:
   $ hg up --quiet --clean --rev "8^"
   $ sleep 1
+  $ cat large1
+  large1 in #3
   $ hg st
+
 Update standins without updating largefiles - large1 is modified and largeX is
 added:
   $ cat << EOF > ../crashupdatelfiles.py
@@ -790,18 +802,25 @@
   $ hg up -Cr "8" --config extensions.crashupdatelfiles=../crashupdatelfiles.py
   [254]
 Check large1 content and status ... and that update will undo modifications:
+  $ hg id
+  d65e59e952a9+ (known-bad-output !)
+  d65e59e952a9 (missing-correct-output !)
   $ cat large1
   large1 in #3
   $ hg st
-  M large1
-  ! largeX
-  $ hg up -Cr .
+  $ hg up -Cr 8
   getting changed largefiles
-  2 largefiles updated, 0 removed
+  1 largefiles updated, 0 removed (known-bad-output !)
+  2 largefiles updated, 0 removed (missing-correct-output !)
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cat large1
-  manually modified before 'hg transplant --continue'
+  large1 in #3 (known-bad-output !)
+  manually modified before 'hg transplant --continue' (missing-correct-output !)
   $ hg st
+  M large1 (known-bad-output !)
+
+  $ hg revert --all --no-backup
+  reverting .hglf/large1 (known-bad-output !)
 Force largefiles rehashing and check that all changes have been caught by
 status and update:
   $ rm .hg/largefiles/dirstate
--- a/tests/test-largefiles-wireproto.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-largefiles-wireproto.t	Thu Mar 02 22:45:44 2023 +0100
@@ -151,14 +151,7 @@
   $ hg commit -m "m2"
   Invoking status precommit hook
   A f2
-  $ hg verify --large
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
-  searching 1 changesets for largefiles
-  verified existence of 1 revisions of 1 largefiles
+  $ hg verify --large -q
   $ hg serve --config extensions.largefiles=! -R ../r6 -d -p $HGPORT --pid-file ../hg.pid
   $ cat ../hg.pid >> $DAEMON_PIDS
   $ hg push http://localhost:$HGPORT
@@ -249,6 +242,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 1 changesets with 1 changes to 1 files
   searching 1 changesets for largefiles
   changeset 0:cf03e5bb9936: f1 missing
@@ -280,14 +274,7 @@
   $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
   $ [ ! -f http-clone/f1 ]
   $ [ ! -f http-clone-usercache ]
-  $ hg -R http-clone verify --large --lfc
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
-  searching 1 changesets for largefiles
-  verified contents of 1 revisions of 1 largefiles
+  $ hg -R http-clone verify --large --lfc -q
   $ hg -R http-clone up -Cqr null
 
 largefiles pulled on update - no server side problems:
@@ -343,14 +330,7 @@
   adding file changes
   added 2 changesets with 2 changes to 2 files
   new changesets 567253b0f523:04d19c27a332 (2 drafts)
-  $ hg -R batchverifyclone verify --large --lfa
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
-  searching 2 changesets for largefiles
-  verified existence of 2 revisions of 2 largefiles
+  $ hg -R batchverifyclone verify --large --lfa -q
   $ tail -1 access.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=statlfile+sha%3D972a1a11f19934401291cc99117ec614933374ce%3Bstatlfile+sha%3Dc801c9cfe94400963fcb683246217d5db77f9a9a x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
   $ hg -R batchverifyclone update
@@ -381,14 +361,7 @@
   added 1 changesets with 1 changes to 1 files
   new changesets 6bba8cb6935d (1 drafts)
   (run 'hg update' to get a working copy)
-  $ hg -R batchverifyclone verify --lfa
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
-  searching 3 changesets for largefiles
-  verified existence of 3 revisions of 3 largefiles
+  $ hg -R batchverifyclone verify --lfa -q
   $ tail -1 access.log
   $LOCALIP - - [$LOGDATE$] "GET /?cmd=statlfile HTTP/1.1" 200 - x-hgarg-1:sha=c8559c3c9cfb42131794b7d8009230403b9b454c x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
 
--- a/tests/test-largefiles.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-largefiles.t	Thu Mar 02 22:45:44 2023 +0100
@@ -1029,14 +1029,7 @@
   2 largefiles updated, 0 removed
   4 files updated, 0 files merged, 0 files removed, 0 files unresolved
   8 additional largefiles cached
-  $ hg -R a-clone1 verify --large --lfa --lfc
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 8 changesets with 24 changes to 10 files
-  searching 8 changesets for largefiles
-  verified contents of 13 revisions of 6 largefiles
+  $ hg -R a-clone1 verify --large --lfa --lfc -q
   $ hg -R a-clone1 sum
   parent: 1:ce8896473775 
    edit files
@@ -1122,7 +1115,7 @@
   6 changesets found
   uncompressed size of bundle content:
       1389 (changelog)
-      1599 (manifests)
+      1698 (manifests)
        254  .hglf/large1
        564  .hglf/large3
        572  .hglf/sub/large4
@@ -1552,6 +1545,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 10 changesets with 28 changes to 10 files
   searching 1 changesets for largefiles
   verified existence of 3 revisions of 3 largefiles
@@ -1561,15 +1555,8 @@
 
   $ mv $TESTTMP/d/.hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928 .
   $ rm .hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928
-  $ hg verify --large
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 10 changesets with 28 changes to 10 files
-  searching 1 changesets for largefiles
+  $ hg verify --large -q
   changeset 9:598410d3eb9a: sub/large4 references missing $TESTTMP/d/.hg/largefiles/e166e74c7303192238d60af5a9c4ce9bef0b7928
-  verified existence of 3 revisions of 3 largefiles
   [1]
 
 - introduce corruption and make sure that it is caught when checking content:
--- a/tests/test-lfconvert.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-lfconvert.t	Thu Mar 02 22:45:44 2023 +0100
@@ -345,6 +345,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 8 changesets with 13 changes to 9 files
   searching 7 changesets for largefiles
   changeset 0:d4892ec57ce2: large references missing $TESTTMP/largefiles-repo-hg/.hg/largefiles/2e000fa7e85759c7f4c254d4d9c33ef481e459a7
--- a/tests/test-lfs-serve-access.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-lfs-serve-access.t	Thu Mar 02 22:45:44 2023 +0100
@@ -357,7 +357,7 @@
   $LOCALIP - - [$ERRDATE$] HG error:      super(badstore, self).download(oid, src, contentlength)
   $LOCALIP - - [$ERRDATE$] HG error:      raise LfsCorruptionError( (glob) (py38 !)
   $LOCALIP - - [$ERRDATE$] HG error:      _(b'corrupt remote lfs object: %s') % oid (glob) (no-py38 !)
-  $LOCALIP - - [$ERRDATE$] HG error:  hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (py3 !)
+  $LOCALIP - - [$ERRDATE$] HG error:  hgext.lfs.blobstore.LfsCorruptionError: corrupt remote lfs object: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c (glob)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
   $LOCALIP - - [$ERRDATE$] Exception happened during processing request '/.hg/lfs/objects/276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d': (glob)
   Traceback (most recent call last):
@@ -388,7 +388,7 @@
   $LOCALIP - - [$ERRDATE$] HG error:      blobstore._verify(oid, b'dummy content') (glob)
   $LOCALIP - - [$ERRDATE$] HG error:      raise LfsCorruptionError( (glob) (py38 !)
   $LOCALIP - - [$ERRDATE$] HG error:      hint=_(b'run hg verify'), (glob) (no-py38 !)
-  $LOCALIP - - [$ERRDATE$] HG error:  hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (py3 !)
+  $LOCALIP - - [$ERRDATE$] HG error:  hgext.lfs.blobstore.LfsCorruptionError: detected corrupt lfs object: 276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d (glob)
   $LOCALIP - - [$ERRDATE$] HG error:   (glob)
 
 Basic Authorization headers are returned by the Batch API, and sent back with
--- a/tests/test-lfs.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-lfs.t	Thu Mar 02 22:45:44 2023 +0100
@@ -787,8 +787,9 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
+  not checking dirstate because of previous errors
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -851,6 +852,7 @@
   checking files
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
   lfs blob sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e renamed large -> l
+  checking dirstate
   checked 5 changesets with 10 changes to 4 files
 
 Verify will not try to download lfs blobs, if told not to by the config option
@@ -865,6 +867,7 @@
   checking files
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
   lfs blob sha256:66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e renamed large -> l
+  checking dirstate
   checked 5 changesets with 10 changes to 4 files
 
 Verify will copy/link all lfs objects into the local store that aren't already
@@ -885,6 +888,7 @@
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: adding b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c to the usercache
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
+  checking dirstate
   checked 5 changesets with 10 changes to 4 files
 
 Verify will not copy/link a corrupted file from the usercache into the local
@@ -897,11 +901,12 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
+  not checking dirstate because of previous errors
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -917,6 +922,7 @@
   lfs: found 66100b384bf761271b407d79fc30cdd0554f3b2c5d944836e936d584b88ce88e in the local lfs store
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
+  checking dirstate
   checked 5 changesets with 10 changes to 4 files
 
 Damaging a file required by the update destination fails the update.
@@ -941,8 +947,9 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
+  not checking dirstate because of previous errors
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -967,11 +974,12 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
+  not checking dirstate because of previous errors
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -987,7 +995,7 @@
 Accessing a corrupt file will complain
 
   $ hg --cwd fromcorrupt2 cat -r 0 large
-  abort: integrity check failed on data/large:0
+  abort: integrity check failed on large:0
   [50]
 
 lfs -> normal -> lfs round trip conversions are possible.  The 'none()'
--- a/tests/test-manifest.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-manifest.t	Thu Mar 02 22:45:44 2023 +0100
@@ -246,12 +246,7 @@
 
   $ hg up -qC .
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 8 changes to 8 files
+  $ hg verify -q
 
   $ hg rollback -q --config ui.rollback=True
   $ hg rm b.txt d.txt
@@ -270,12 +265,7 @@
   ccc.txt\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
   e.txt\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 9 changes to 9 files
+  $ hg verify -q
   $ cd ..
 
 Test manifest cache interraction with shares
--- a/tests/test-merge-internal-tools-pattern.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-merge-internal-tools-pattern.t	Thu Mar 02 22:45:44 2023 +0100
@@ -140,3 +140,23 @@
   third line
   line 4b
   line 4a
+
+Merge using internal:union-other-first tool:
+
+  $ hg update -C 4
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+  $ echo "[merge-patterns]" > .hg/hgrc
+  $ echo "* = internal:union-other-first" >> .hg/hgrc
+
+  $ hg merge 3
+  merging f
+  0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+
+  $ cat f
+  line 1
+  line 2
+  third line
+  line 4a
+  line 4b
--- a/tests/test-narrow-clone-stream.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-narrow-clone-stream.t	Thu Mar 02 22:45:44 2023 +0100
@@ -90,8 +90,6 @@
   requires
   undo
   undo.backupfiles
-  undo.narrowspec
-  undo.phaseroots
 
 Checking that repository has all the required data and not broken
 
@@ -101,4 +99,5 @@
   checking directory manifests (tree !)
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 40 changesets with 1 changes to 1 files
--- a/tests/test-narrow-exchange.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-narrow-exchange.t	Thu Mar 02 22:45:44 2023 +0100
@@ -164,12 +164,7 @@
   remote: adding file changes
   remote: added 4 changesets with 4 changes to 2 files
   $ cd ../master
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 8 changesets with 10 changes to 3 files
+  $ hg verify -q
 
 Can not push to wider repo if change affects paths in wider repo that are
 not also in narrower repo
@@ -218,8 +213,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
-  remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+  remote: error: pretxnchangegroup.lfs hook raised an exception: inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   remote: transaction abort! (lfs-on !)
   remote: rollback completed (lfs-on !)
-  remote: abort: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+  remote: abort: inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-narrow-expanddirstate.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-narrow-expanddirstate.t	Thu Mar 02 22:45:44 2023 +0100
@@ -74,7 +74,7 @@
   >   narrowspec.copytoworkingcopy(repo)
   >   newmatcher = narrowspec.match(repo.root, includes, excludes)
   >   added = matchmod.differencematcher(newmatcher, currentmatcher)
-  >   with repo.dirstate.parentchange():
+  >   with repo.dirstate.changing_parents(repo):
   >       for f in repo[b'.'].manifest().walk(added):
   >           repo.dirstate.update_file(
   >               f,
--- a/tests/test-narrow-share.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-narrow-share.t	Thu Mar 02 22:45:44 2023 +0100
@@ -161,13 +161,7 @@
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd share-unshare
   $ hg unshare
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests (tree !)
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 3 changes to 3 files
+  $ hg verify -q
   $ cd ..
 
 Dirstate should be left alone when upgrading from version of hg that didn't support narrow+share
--- a/tests/test-narrow-widen-no-ellipsis.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-narrow-widen-no-ellipsis.t	Thu Mar 02 22:45:44 2023 +0100
@@ -274,13 +274,7 @@
   I path:d3
   I path:d6
   I path:d9
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests (tree !)
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 4 changes to 4 files
+  $ hg verify -q
   $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
   10: add d10/f
   9: add d9/f
@@ -321,13 +315,7 @@
 
 Verify shouldn't claim the repo is corrupt after a widen.
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests (tree !)
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 5 changes to 5 files
+  $ hg verify -q
 
 Widening preserves parent of local commit
 
--- a/tests/test-narrow-widen.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-narrow-widen.t	Thu Mar 02 22:45:44 2023 +0100
@@ -280,13 +280,7 @@
   I path:d3
   I path:d6
   I path:d9
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests (tree !)
-  crosschecking files in changesets and manifests
-  checking files
-  checked 8 changesets with 4 changes to 4 files
+  $ hg verify -q
   $ hg l
   @  ...7: add d10/f
   |
@@ -340,13 +334,7 @@
 
 Verify shouldn't claim the repo is corrupt after a widen.
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests (tree !)
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 5 changes to 5 files
+  $ hg verify -q
 
 Widening preserves parent of local commit
 
--- a/tests/test-notify.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-notify.t	Thu Mar 02 22:45:44 2023 +0100
@@ -467,7 +467,7 @@
   Content-Transfer-Encoding: 8bit
   X-Test: foo
   Date: * (glob)
-  Subject: =?utf-8?b?w6AuLi4=?= (py3 !)
+  Subject: =?utf-8?b?w6AuLi4=?=
   From: test@test.com
   X-Hg-Notification: changeset 0f25f9c22b4c
   Message-Id: <*> (glob)
--- a/tests/test-obsolete-changeset-exchange.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-obsolete-changeset-exchange.t	Thu Mar 02 22:45:44 2023 +0100
@@ -47,12 +47,7 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 2 files
-  $ hg -R ../other verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
+  $ hg -R ../other verify -q
 
 Adding a changeset going extinct locally
 ------------------------------------------
--- a/tests/test-patchbomb.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-patchbomb.t	Thu Mar 02 22:45:44 2023 +0100
@@ -550,7 +550,7 @@
   X-Mercurial-Series-Id: <f81ef97829467e868fc4.240@test-hostname>
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Thu, 01 Jan 1970 00:04:00 +0000
-  From: =?iso-8859-1?q?Q?= <quux> (py3 !)
+  From: =?iso-8859-1?q?Q?= <quux>
   To: foo
   Cc: bar
   
@@ -2435,9 +2435,9 @@
   User-Agent: Mercurial-patchbomb/* (glob)
   Date: Tue, 01 Jan 1980 00:01:00 +0000
   From: quux
-  To: =?iso-8859-1?q?spam?= <spam>, eggs, toast (py3 !)
-  Cc: foo, bar@example.com, =?iso-8859-1?q?A=2C_B_=3C=3E?= <a@example.com> (py3 !)
-  Bcc: =?iso-8859-1?q?Quux=2C_A=2E?= <quux> (py3 !)
+  To: =?iso-8859-1?q?spam?= <spam>, eggs, toast
+  Cc: foo, bar@example.com, =?iso-8859-1?q?A=2C_B_=3C=3E?= <a@example.com>
+  Bcc: =?iso-8859-1?q?Quux=2C_A=2E?= <quux>
   
   # HG changeset patch
   # User test
@@ -2754,7 +2754,7 @@
   MIME-Version: 1.0
   Content-Type: text/plain; charset="iso-8859-1"
   Content-Transfer-Encoding: quoted-printable
-  Subject: =?utf-8?b?W1BBVENIIDIgb2YgNl0gw6dh?= (py3 !)
+  Subject: =?utf-8?b?W1BBVENIIDIgb2YgNl0gw6dh?=
   X-Mercurial-Node: f81ef97829467e868fc405fccbcfa66217e4d3e6
   X-Mercurial-Series-Index: 2
   X-Mercurial-Series-Total: 6
--- a/tests/test-permissions.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-permissions.t	Thu Mar 02 22:45:44 2023 +0100
@@ -19,31 +19,17 @@
 
   $ hg commit -m "1"
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ chmod -r .hg/store/data/a.i
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
+  $ hg verify -q
   abort: Permission denied: '$TESTTMP/t/.hg/store/data/a.i'
   [255]
 
   $ chmod +r .hg/store/data/a.i
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ chmod -w .hg/store/data/a.i
 
--- a/tests/test-phases.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-phases.t	Thu Mar 02 22:45:44 2023 +0100
@@ -9,7 +9,7 @@
   > txnclose-phase.test = sh $TESTTMP/hook.sh
   > EOF
 
-  $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
+  $ hglog() { hg log -G --template "{rev} {phaseidx} {desc}\n" $*; }
   $ mkcommit() {
   >    echo "$1" > "$1"
   >    hg add "$1"
@@ -36,7 +36,8 @@
 New commit are draft by default
 
   $ hglog
-  0 1 A
+  @  0 1 A
+  
 
 Following commit are draft too
 
@@ -45,8 +46,10 @@
   test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56:   -> draft
 
   $ hglog
-  1 1 B
-  0 1 A
+  @  1 1 B
+  |
+  o  0 1 A
+  
 
 Working directory phase is secret when its parent is secret.
 
@@ -103,8 +106,10 @@
   $ hg phase
   1: public
   $ hglog
-  1 0 B
-  0 0 A
+  @  1 0 B
+  |
+  o  0 0 A
+  
 
   $ mkcommit C
   test-debug-phase: new rev 2:  x -> 1
@@ -114,10 +119,14 @@
   test-hook-close-phase: b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e:   -> draft
 
   $ hglog
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  @  3 1 D
+  |
+  o  2 1 C
+  |
+  o  1 0 B
+  |
+  o  0 0 A
+  
 
 Test creating changeset as secret
 
@@ -125,11 +134,16 @@
   test-debug-phase: new rev 4:  x -> 2
   test-hook-close-phase: a603bfb5a83e312131cebcd05353c217d4d21dde:   -> secret
   $ hglog
-  4 2 E
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  @  4 2 E
+  |
+  o  3 1 D
+  |
+  o  2 1 C
+  |
+  o  1 0 B
+  |
+  o  0 0 A
+  
 
 Test the secret property is inherited
 
@@ -137,12 +151,18 @@
   test-debug-phase: new rev 5:  x -> 2
   test-hook-close-phase: a030c6be5127abc010fcbff1851536552e6951a8:   -> secret
   $ hglog
-  5 2 H
-  4 2 E
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  @  5 2 H
+  |
+  o  4 2 E
+  |
+  o  3 1 D
+  |
+  o  2 1 C
+  |
+  o  1 0 B
+  |
+  o  0 0 A
+  
 
 Even on merge
 
@@ -152,13 +172,20 @@
   created new head
   test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519:   -> draft
   $ hglog
-  6 1 B'
-  5 2 H
-  4 2 E
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  @  6 1 B'
+  |
+  | o  5 2 H
+  | |
+  | o  4 2 E
+  | |
+  | o  3 1 D
+  | |
+  | o  2 1 C
+  |/
+  o  1 0 B
+  |
+  o  0 0 A
+  
   $ hg merge 4 # E
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
@@ -170,14 +197,22 @@
   test-hook-close-phase: 17a481b3bccb796c0521ae97903d81c52bfee4af:   -> secret
 
   $ hglog
-  7 2 merge B' and E
-  6 1 B'
-  5 2 H
-  4 2 E
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  @    7 2 merge B' and E
+  |\
+  | o  6 1 B'
+  | |
+  +---o  5 2 H
+  | |
+  o |  4 2 E
+  | |
+  o |  3 1 D
+  | |
+  o |  2 1 C
+  |/
+  o  1 0 B
+  |
+  o  0 0 A
+  
 
 Test secret changeset are not pushed
 
@@ -221,21 +256,34 @@
   test-hook-close-phase: b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e:   -> draft
   test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519:   -> draft
   $ hglog
-  7 2 merge B' and E
-  6 1 B'
-  5 2 H
-  4 2 E
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  @    7 2 merge B' and E
+  |\
+  | o  6 1 B'
+  | |
+  +---o  5 2 H
+  | |
+  o |  4 2 E
+  | |
+  o |  3 1 D
+  | |
+  o |  2 1 C
+  |/
+  o  1 0 B
+  |
+  o  0 0 A
+  
   $ cd ../push-dest
   $ hglog
-  4 1 B'
-  3 1 D
-  2 1 C
-  1 0 B
-  0 0 A
+  o  4 1 B'
+  |
+  | o  3 1 D
+  | |
+  | o  2 1 C
+  |/
+  o  1 0 B
+  |
+  o  0 0 A
+  
 
 (Issue3303)
 Check that remote secret changeset are ignore when checking creation of remote heads
@@ -328,11 +376,16 @@
   test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519:   -> public
   (run 'hg heads' to see heads, 'hg merge' to merge)
   $ hglog
-  4 0 B'
-  3 0 D
-  2 0 C
-  1 0 B
-  0 0 A
+  o  4 0 B'
+  |
+  | o  3 0 D
+  | |
+  | o  2 0 C
+  |/
+  o  1 0 B
+  |
+  o  0 0 A
+  
   $ cd ..
 
 But secret can still be bundled explicitly
@@ -357,11 +410,16 @@
   test-hook-close-phase: b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e:   -> public
   test-hook-close-phase: cf9fe039dfd67e829edf6522a45de057b5c86519:   -> public
   $ hglog -R clone-dest
-  4 0 B'
-  3 0 D
-  2 0 C
-  1 0 B
-  0 0 A
+  o  4 0 B'
+  |
+  | o  3 0 D
+  | |
+  | o  2 0 C
+  |/
+  o  1 0 B
+  |
+  o  0 0 A
+  
 
 Test summary
 
@@ -385,16 +443,28 @@
 
   $ cd initialrepo
   $ hglog -r 'public()'
-  0 0 A
-  1 0 B
+  o  1 0 B
+  |
+  o  0 0 A
+  
   $ hglog -r 'draft()'
-  2 1 C
-  3 1 D
-  6 1 B'
+  o  6 1 B'
+  |
+  ~
+  o  3 1 D
+  |
+  o  2 1 C
+  |
+  ~
   $ hglog -r 'secret()'
-  4 2 E
-  5 2 H
-  7 2 merge B' and E
+  @    7 2 merge B' and E
+  |\
+  | ~
+  | o  5 2 H
+  |/
+  o  4 2 E
+  |
+  ~
 
 test that phase are displayed in log at debug level
 
@@ -730,12 +800,7 @@
 because repo.cancopy() is False
 
   $ cd ../initialrepo
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 8 changesets with 7 changes to 7 files
+  $ hg verify -q
 
   $ cd ..
 
@@ -753,8 +818,6 @@
   $ hg phase 6
   6: draft
   $ hg --config hooks.pretxnclose="sh $TESTTMP/savepending.sh" phase -f -s 6
-  transaction abort!
-  rollback completed
   abort: pretxnclose hook exited with status 1
   [40]
   $ cp .hg/store/phaseroots.pending.saved .hg/store/phaseroots.pending
@@ -776,8 +839,6 @@
   7: secret
   @push-dest
   6: draft
-  transaction abort!
-  rollback completed
   abort: pretxnclose hook exited with status 1
   [40]
 
@@ -850,13 +911,9 @@
 Try various actions. only the draft move should succeed
 
   $ hg phase --public b3325c91a4d9
-  transaction abort!
-  rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
   [40]
   $ hg phase --public a603bfb5a83e
-  transaction abort!
-  rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
   [40]
   $ hg phase --draft 17a481b3bccb
@@ -867,8 +924,6 @@
   test-hook-close-phase: a603bfb5a83e312131cebcd05353c217d4d21dde:  secret -> draft
   test-hook-close-phase: 17a481b3bccb796c0521ae97903d81c52bfee4af:  secret -> draft
   $ hg phase --public 17a481b3bccb
-  transaction abort!
-  rollback completed
   abort: pretxnclose-phase.nopublish_D hook exited with status 1
   [40]
 
@@ -1047,3 +1102,30 @@
   $ hg up tip
   2 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ cd ..
+
+Testing that command line flags override configuration
+
+  $ hg init commit-overrides
+  $ cd commit-overrides
+
+`hg commit --draft` overrides new-commit=secret
+
+  $ mkcommit A --config phases.new-commit='secret' --draft
+  test-debug-phase: new rev 0:  x -> 1
+  test-hook-close-phase: 4a2df7238c3b48766b5e22fafbb8a2f506ec8256:   -> draft
+  $ hglog
+  @  0 1 A
+  
+
+`hg commit --secret` overrides new-commit=draft
+
+  $ mkcommit B --config phases.new-commit='draft' --secret
+  test-debug-phase: new rev 1:  x -> 2
+  test-hook-close-phase: 27547f69f25460a52fff66ad004e58da7ad3fb56:   -> secret
+  $ hglog
+  @  1 2 B
+  |
+  o  0 1 A
+  
+
+  $ cd ..
--- a/tests/test-pull-bundle.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-pull-bundle.t	Thu Mar 02 22:45:44 2023 +0100
@@ -33,8 +33,6 @@
 
   $ cd repo
   $ cat <<EOF > .hg/hgrc
-  > [server]
-  > pullbundle = True
   > [experimental]
   > evolution = True
   > [extensions]
--- a/tests/test-pull-network.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-pull-network.t	Thu Mar 02 22:45:44 2023 +0100
@@ -8,12 +8,7 @@
   adding foo
   $ hg commit -m 1
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ hg serve -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid >> $DAEMON_PIDS
@@ -30,12 +25,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
   $ cd copy
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ hg co
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-pull-permission.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-pull-permission.t	Thu Mar 02 22:45:44 2023 +0100
@@ -23,11 +23,6 @@
   $ chmod +w a/.hg/store # let test clean up
 
   $ cd b
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-pull-pull-corruption.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-pull-pull-corruption.t	Thu Mar 02 22:45:44 2023 +0100
@@ -65,11 +65,6 @@
 see the result
 
   $ wait
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 11 changes to 1 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-push.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-push.t	Thu Mar 02 22:45:44 2023 +0100
@@ -18,7 +18,7 @@
   >    echo
   >    hg init test-revflag-"$i"
   >    hg -R test-revflag push -r "$i" test-revflag-"$i"
-  >    hg -R test-revflag-"$i" verify
+  >    hg -R test-revflag-"$i" verify -q
   > done
   
   pushing to test-revflag-0
@@ -27,11 +27,6 @@
   adding manifests
   adding file changes
   added 1 changesets with 1 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
   
   pushing to test-revflag-1
   searching for changes
@@ -39,11 +34,6 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   
   pushing to test-revflag-2
   searching for changes
@@ -51,11 +41,6 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   
   pushing to test-revflag-3
   searching for changes
@@ -63,11 +48,6 @@
   adding manifests
   adding file changes
   added 4 changesets with 4 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 4 changes to 1 files
   
   pushing to test-revflag-4
   searching for changes
@@ -75,11 +55,6 @@
   adding manifests
   adding file changes
   added 2 changesets with 2 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   
   pushing to test-revflag-5
   searching for changes
@@ -87,11 +62,6 @@
   adding manifests
   adding file changes
   added 3 changesets with 3 changes to 1 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   
   pushing to test-revflag-6
   searching for changes
@@ -99,11 +69,6 @@
   adding manifests
   adding file changes
   added 4 changesets with 5 changes to 2 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 5 changes to 2 files
   
   pushing to test-revflag-7
   searching for changes
@@ -111,11 +76,6 @@
   adding manifests
   adding file changes
   added 5 changesets with 6 changes to 3 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 6 changes to 3 files
   
   pushing to test-revflag-8
   searching for changes
@@ -123,11 +83,6 @@
   adding manifests
   adding file changes
   added 5 changesets with 5 changes to 2 files
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
 
   $ cd test-revflag-8
 
@@ -141,12 +96,7 @@
   new changesets c70afb1ee985:faa2e4234c7a
   (run 'hg heads' to see heads, 'hg merge' to merge)
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
 
   $ cd ..
 
@@ -189,13 +139,9 @@
 
 Expected to fail:
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
+  $ hg verify -q
    beta@1: dddc47b3ba30 not in manifests
-  checked 2 changesets with 4 changes to 2 files
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
@@ -224,13 +170,9 @@
 
 Expected to fail:
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
+  $ hg verify -q
    beta@1: manifest refers to unknown revision dddc47b3ba30
-  checked 2 changesets with 2 changes to 2 files
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
--- a/tests/test-qrecord.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-qrecord.t	Thu Mar 02 22:45:44 2023 +0100
@@ -68,6 +68,7 @@
       --close-branch        mark a branch head as closed
       --amend               amend the parent of the working directory
    -s --secret              use the secret phase for committing
+      --draft               use the draft phase for committing
    -e --edit                invoke editor on commit messages
    -I --include PATTERN [+] include names matching the given patterns
    -X --exclude PATTERN [+] exclude names matching the given patterns
--- a/tests/test-racy-mutations.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-racy-mutations.t	Thu Mar 02 22:45:44 2023 +0100
@@ -6,8 +6,12 @@
 - something (that doesn't respect the lock file) writing to the .hg directory
 while we're running
 
-  $ hg init a
-  $ cd a
+
+Initial setup
+-------------
+
+  $ hg init base-repo
+  $ cd base-repo
 
   $ cat > "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh" <<EOF
   >     [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
@@ -26,46 +30,63 @@
   $ echo r0 > r0
   $ hg commit -qAm 'r0'
 
+  $ cd ..
+  $ cp -R base-repo main-client
+  $ cp -R base-repo racing-client
+
+  $ mkdir sync
+  $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/sync/.editor_started"
+  $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/sync/.mischief_managed"
+  $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/sync/.jobs_finished"
+
+Actual test
+-----------
+
 Start an hg commit that will take a while
-  $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/a/.editor_started"
-  $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/a/.mischief_managed"
-  $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/a/.jobs_finished"
+
+  $ cd main-client
 
 #if fail-if-detected
-  $ cat >> .hg/hgrc << EOF
+  $ cat >> $HGRCPATH << EOF
   > [debug]
   > revlog.verifyposition.changelog = fail
   > EOF
 #endif
 
-  $ cat >> .hg/hgrc << EOF
-  > [ui]
-  > editor=sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh
-  > EOF
-
   $ echo foo > foo
-  $ (unset HGEDITOR;
-  >      WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
-  >      WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
-  >           hg commit -qAm 'r1 (foo)' --edit foo > .foo_commit_out 2>&1 ; touch "${JOBS_FINISHED}") &
+  $ (
+  >    unset HGEDITOR;
+  >    WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
+  >    WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
+  >    hg commit -qAm 'r1 (foo)' --edit foo \
+  >    --config ui.editor="sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh" \
+  >    > .foo_commit_out 2>&1 ;\
+  >    touch "${JOBS_FINISHED}"
+  > ) &
 
 Wait for the "editor" to actually start
   $ sh "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" 5 "${EDITOR_STARTED}"
 
-  $ cat >> .hg/hgrc << EOF
-  > [ui]
-  > editor=
-  > EOF
 
-Break the locks, and make another commit.
-  $ hg debuglocks -LW
+Do a concurrent edition
+  $ cd ../racing-client
+  $ touch ../pre-race
+  $ sleep 1
   $ echo bar > bar
-  $ hg commit -qAm 'r2 (bar)' bar
-  $ hg debugrevlogindex -c
+  $ hg --repository ../racing-client commit -qAm 'r2 (bar)' bar
+  $ hg --repository ../racing-client debugrevlogindex -c
      rev linkrev nodeid       p1           p2
        0       0 222799e2f90b 000000000000 000000000000
        1       1 6f124f6007a0 222799e2f90b 000000000000
 
+We simulate an network FS race by overwriting raced repo content with the new
+content of the files changed in the racing repository
+
+  $ for x in `find . -type f -newer ../pre-race`; do
+  >    cp $x ../main-client/$x
+  > done
+  $ cd ../main-client
+
 Awaken the editor from that first commit
   $ touch "${MISCHIEF_MANAGED}"
 And wait for it to finish
@@ -85,10 +106,10 @@
 
 #if fail-if-detected
   $ cat .foo_commit_out
+  note: commit message saved in .hg/last-message.txt
+  note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   transaction abort!
   rollback completed
-  note: commit message saved in .hg/last-message.txt
-  note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
   abort: 00changelog.i: file cursor at position 249, expected 121
 And no corruption in the changelog.
   $ hg debugrevlogindex -c
--- a/tests/test-rebase-abort.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rebase-abort.t	Thu Mar 02 22:45:44 2023 +0100
@@ -393,7 +393,6 @@
   .hg/merge/state
   .hg/rebasestate
   .hg/undo.backup.dirstate
-  .hg/undo.dirstate
   .hg/updatestate
 
   $ hg rebase -s 3 -d tip
--- a/tests/test-rebase-conflicts.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rebase-conflicts.t	Thu Mar 02 22:45:44 2023 +0100
@@ -315,7 +315,7 @@
   adding manifests
   adding file changes
   adding f1.txt revisions
-  bundle2-input-part: total payload size 1686
+  bundle2-input-part: total payload size 1739
   bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
   bundle2-input-part: total payload size 74
   bundle2-input-part: "phase-heads" supported
--- a/tests/test-rebase-transaction.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rebase-transaction.t	Thu Mar 02 22:45:44 2023 +0100
@@ -168,8 +168,6 @@
   rebasing 1:112478962961 B "B"
   rebasing 3:26805aba1e60 C "C"
   rebasing 5:f585351a92f8 D tip "D"
-  transaction abort!
-  rollback completed
   abort: edit failed: false exited with status 1
   [250]
   $ hg tglog
--- a/tests/test-rebuildstate.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rebuildstate.t	Thu Mar 02 22:45:44 2023 +0100
@@ -17,7 +17,7 @@
   >   try:
   >     for file in pats:
   >       if opts.get('normal_lookup'):
-  >         with repo.dirstate.parentchange():
+  >         with repo.dirstate.changing_parents(repo):
   >             repo.dirstate.update_file(
   >                 file,
   >                 p1_tracked=True,
--- a/tests/test-record.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-record.t	Thu Mar 02 22:45:44 2023 +0100
@@ -51,6 +51,7 @@
       --close-branch        mark a branch head as closed
       --amend               amend the parent of the working directory
    -s --secret              use the secret phase for committing
+      --draft               use the draft phase for committing
    -e --edit                invoke editor on commit messages
    -I --include PATTERN [+] include names matching the given patterns
    -X --exclude PATTERN [+] exclude names matching the given patterns
--- a/tests/test-remotefilelog-corrupt-cache.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-remotefilelog-corrupt-cache.t	Thu Mar 02 22:45:44 2023 +0100
@@ -38,7 +38,7 @@
   $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
   $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
   $ hg up tip 2>&1 | egrep "^[^ ].*unexpected remotefilelog"
-  hgext.remotefilelog.shallowutil.BadRemotefilelogHeader: unexpected remotefilelog header: illegal format (py3 !)
+  abort: unexpected remotefilelog header: illegal format
 
 Verify detection and remediation when remotefilelog.validatecachelog is set
 
--- a/tests/test-repair-strip.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-repair-strip.t	Thu Mar 02 22:45:44 2023 +0100
@@ -66,6 +66,7 @@
    (expected 1)
    b@?: 736c29771fba not in manifests
   warning: orphan data file 'data/c.i'
+  not checking dirstate because of previous errors
   checked 2 changesets with 3 changes to 2 files
   2 warnings encountered!
   2 integrity errors encountered!
@@ -79,6 +80,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 2 changesets with 2 changes to 2 files
   $ teststrip 0 2 r .hg/store/data/b.i
   % before update 0, strip 2
@@ -93,6 +95,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 4 changesets with 4 changes to 3 files
   % journal contents
   (no journal)
@@ -124,6 +127,7 @@
    b@?: rev 1 points to nonexistent changeset 2
    (expected 1)
    c@?: rev 0 points to nonexistent changeset 3
+  not checking dirstate because of previous errors
   checked 2 changesets with 4 changes to 3 files
   1 warnings encountered!
   7 integrity errors encountered!
@@ -138,6 +142,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 2 changesets with 2 changes to 2 files
 
   $ cd ..
--- a/tests/test-revlog-ancestry.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-revlog-ancestry.py	Thu Mar 02 22:45:44 2023 +0100
@@ -19,8 +19,10 @@
     f = open(name, 'wb')
     f.write(b'%s\n' % name)
     f.close()
-    repo[None].add([name])
-    commit(name, time)
+    with repo.wlock():
+        with repo.dirstate.changing_files(repo):
+            repo[None].add([name])
+        commit(name, time)
 
 
 def update(rev):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revlog-delta-find.t	Thu Mar 02 22:45:44 2023 +0100
@@ -0,0 +1,333 @@
+==========================================================
+Test various things around delta computation within revlog
+==========================================================
+
+
+basic setup
+-----------
+
+  $ cat << EOF >> $HGRCPATH
+  > [debug]
+  > revlog.debug-delta=yes
+  > EOF
+  $ cat << EOF >> sha256line.py
+  > # a way to quickly produce file of significant size and poorly compressable content.
+  > import hashlib
+  > import sys
+  > for line in sys.stdin:
+  >     print(hashlib.sha256(line.encode('utf8')).hexdigest())
+  > EOF
+
+  $ hg init base-repo
+  $ cd base-repo
+
+create a "large" file
+
+  $ $TESTDIR/seq.py 1000 | $PYTHON $TESTTMP/sha256line.py > my-file.txt
+  $ hg add my-file.txt
+  $ hg commit -m initial-commit
+  DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+
+Add more change at the end of the file
+
+  $ $TESTDIR/seq.py 1001 1200 | $PYTHON $TESTTMP/sha256line.py >> my-file.txt
+  $ hg commit -m "large-change"
+  DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+
+Add small change at the start
+
+  $ hg up 'desc("initial-commit")' --quiet
+  $ mv my-file.txt foo
+  $ echo "small change at the start" > my-file.txt
+  $ cat foo >> my-file.txt
+  $ rm foo
+  $ hg commit -m "small-change"
+  DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  created new head
+
+
+  $ hg log -r 'head()' -T '{node}\n' >> ../base-heads.nodes
+  $ hg log -r 'desc("initial-commit")' -T '{node}\n' >> ../initial.node
+  $ hg log -r 'desc("small-change")' -T '{node}\n' >> ../small.node
+  $ hg log -r 'desc("large-change")' -T '{node}\n' >> ../large.node
+  $ cd ..
+
+Check delta find policy and result for merge on commit
+======================================================
+
+Check that delta of merge pick best of the two parents
+------------------------------------------------------
+
+As we check against both parents, the one with the largest change should
+produce the smallest delta and be picked.
+
+  $ hg clone base-repo test-parents --quiet
+  $ hg -R test-parents update 'nodefromfile("small.node")' --quiet
+  $ hg -R test-parents merge 'nodefromfile("large.node")' --quiet
+
+The delta base is the "large" revision as it produce a smaller delta.
+
+  $ hg -R test-parents commit -m "merge from small change"
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=1 * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+
+Check that the behavior tested above can we disabled
+----------------------------------------------------
+
+We disable the checking of both parent at the same time. The `small` change,
+that produce a less optimal delta, should be picked first as it is "closer" to
+the new commit.
+
+  $ hg clone base-repo test-no-parents --quiet
+  $ hg -R test-no-parents update 'nodefromfile("small.node")' --quiet
+  $ hg -R test-no-parents merge 'nodefromfile("large.node")' --quiet
+
+The delta base is the "large" revision as it produce a smaller delta.
+
+  $ hg -R test-no-parents commit -m "merge from small change" \
+  > --config storage.revlog.optimize-delta-parent-choice=no
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+
+
+Check delta-find policy and result when unbundling
+==================================================
+
+Build a bundle with all delta built against p1
+
+  $ hg bundle -R test-parents --all --config devel.bundle.delta=p1 all-p1.hg
+  4 changesets found
+
+Default policy of trusting delta from the bundle
+------------------------------------------------
+
+Keeping the `p1` delta used in the bundle is sub-optimal for storage, but
+strusting in-bundle delta is faster to apply.
+
+  $ hg init bundle-default
+  $ hg -R bundle-default unbundle all-p1.hg --quiet
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+
+(confirm the file revision are in the same order, 2 should be smaller than 1)
+
+  $ hg -R bundle-default debugdata my-file.txt 2 | wc -l
+  \s*1001 (re)
+  $ hg -R bundle-default debugdata my-file.txt 1 | wc -l
+  \s*1200 (re)
+
+explicitly enabled
+------------------
+
+Keeping the `p1` delta used in the bundle is sub-optimal for storage, but
+strusting in-bundle delta is faster to apply.
+
+  $ hg init bundle-reuse-enabled
+  $ hg -R bundle-reuse-enabled unbundle all-p1.hg --quiet \
+  > --config storage.revlog.reuse-external-delta-parent=yes
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+
+(confirm the file revision are in the same order, 2 should be smaller than 1)
+
+  $ hg -R bundle-reuse-enabled debugdata my-file.txt 2 | wc -l
+  \s*1001 (re)
+  $ hg -R bundle-reuse-enabled debugdata my-file.txt 1 | wc -l
+  \s*1200 (re)
+
+explicitly disabled
+-------------------
+
+Not reusing the delta-base from the parent means we the delta will be made
+against the "best" parent. (so not the same as the previous two)
+
+  $ hg init bundle-reuse-disabled
+  $ hg -R bundle-reuse-disabled unbundle all-p1.hg --quiet \
+  > --config storage.revlog.reuse-external-delta-parent=no
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=1 * (glob)
+
+(confirm the file revision are in the same order, 2 should be smaller than 1)
+
+  $ hg -R bundle-reuse-disabled debugdata my-file.txt 2 | wc -l
+  \s*1001 (re)
+  $ hg -R bundle-reuse-disabled debugdata my-file.txt 1 | wc -l
+  \s*1200 (re)
+
+
+Check the path.*:pulled-delta-reuse-policy option
+==========================================
+
+Get a repository with the bad parent picked and a clone ready to pull the merge
+
+  $ cp -ar bundle-reuse-enabled peer-bad-delta
+  $ hg clone peer-bad-delta local-pre-pull --rev `cat large.node` --rev `cat small.node` --quiet
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=0: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=1: delta-base=0 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=2: delta-base=0 * (glob)
+
+Check the parent order for the file
+
+  $ hg -R local-pre-pull debugdata my-file.txt 2 | wc -l
+  \s*1001 (re)
+  $ hg -R local-pre-pull debugdata my-file.txt 1 | wc -l
+  \s*1200 (re)
+
+Pull with no value (so the default)
+-----------------------------------
+
+default is to reuse the (bad) delta
+
+  $ cp -ar local-pre-pull local-no-value
+  $ hg -R local-no-value pull --quiet
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+
+Pull with explicitly the default
+--------------------------------
+
+default is to reuse the (bad) delta
+
+  $ cp -ar local-pre-pull local-default
+  $ hg -R local-default pull --quiet --config 'paths.default:pulled-delta-reuse-policy=default'
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+
+Pull with no-reuse
+------------------
+
+We don't reuse the base, so we get a better delta
+
+  $ cp -ar local-pre-pull local-no-reuse
+  $ hg -R local-no-reuse pull --quiet --config 'paths.default:pulled-delta-reuse-policy=no-reuse'
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=1 * (glob)
+
+Pull with try-base
+------------------
+
+We requested to use the (bad) delta
+
+  $ cp -ar local-pre-pull local-try-base
+  $ hg -R local-try-base pull --quiet --config 'paths.default:pulled-delta-reuse-policy=try-base'
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+
+Case where we force a "bad" delta to be applied
+===============================================
+
+We build a very different file content to force a full snapshot
+
+  $ cp -ar peer-bad-delta peer-bad-delta-with-full
+  $ cp -ar local-pre-pull local-pre-pull-full
+  $ echo '[paths]' >> local-pre-pull-full/.hg/hgrc
+  $ echo 'default=../peer-bad-delta-with-full' >> local-pre-pull-full/.hg/hgrc
+
+  $ hg -R peer-bad-delta-with-full update 'desc("merge")' --quiet
+  $ ($TESTDIR/seq.py 2000 2100; $TESTDIR/seq.py 500 510; $TESTDIR/seq.py 3000 3050) \
+  > | $PYTHON $TESTTMP/sha256line.py > peer-bad-delta-with-full/my-file.txt
+  $ hg -R peer-bad-delta-with-full commit -m 'trigger-full'
+  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+
+Check that "try-base" behavior challenge the delta
+--------------------------------------------------
+
+The bundling process creates a delta against the previous revision, however this
+is an invalid chain for the client, so it is not considered and we do a full
+snapshot again.
+
+  $ cp -ar local-pre-pull-full local-try-base-full
+  $ hg -R local-try-base-full pull --quiet \
+  > --config 'paths.default:pulled-delta-reuse-policy=try-base'
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * (glob)
+
+Check that "forced" behavior do not challenge the delta, even if it is full.
+---------------------------------------------------------------------------
+
+A full bundle should be accepted as full bundle without recomputation
+
+  $ cp -ar local-pre-pull-full local-forced-full
+  $ hg -R local-forced-full pull --quiet \
+  > --config 'paths.default:pulled-delta-reuse-policy=forced'
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 is-cached=1 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - * (glob)
+
+Check that "forced" behavior do not challenge the delta, even if it is bad.
+---------------------------------------------------------------------------
+
+The client does not challenge anything and applies the bizarre delta directly.
+
+Note: If the bundling process becomes smarter, this test might no longer work
+(as the server won't be sending "bad" deltas anymore) and might need something
+more subtle to test this behavior.
+
+  $ hg bundle -R peer-bad-delta-with-full --all --config devel.bundle.delta=p1 all-p1.hg
+  5 changesets found
+  $ cp -ar local-pre-pull-full local-forced-full-p1
+  $ hg -R local-forced-full-p1 pull --quiet \
+  > --config 'paths.*:pulled-delta-reuse-policy=forced' all-p1.hg
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 * (glob)
--- a/tests/test-revlog-raw.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-revlog-raw.py	Thu Mar 02 22:45:44 2023 +0100
@@ -1,7 +1,6 @@
 # test revlog interaction about raw data (flagprocessor)
 
 
-import collections
 import hashlib
 import sys
 
@@ -54,10 +53,6 @@
     b'sparse-revlog': True,
 }
 
-# The test wants to control whether to use delta explicitly, based on
-# "storedeltachains".
-revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
-
 
 def abort(msg):
     print('abort: %s' % msg)
@@ -471,21 +466,21 @@
         print('  got:      %s' % result)
 
 
-snapshotmapall = {0: [6, 8, 11, 17, 19, 25], 8: [21], -1: [0, 30]}
-snapshotmap15 = {0: [17, 19, 25], 8: [21], -1: [30]}
+snapshotmapall = {0: {6, 8, 11, 17, 19, 25}, 8: {21}, -1: {0, 30}}
+snapshotmap15 = {0: {17, 19, 25}, 8: {21}, -1: {30}}
 
 
 def findsnapshottest(rlog):
-    resultall = collections.defaultdict(list)
-    deltas._findsnapshots(rlog, resultall, 0)
-    resultall = dict(resultall.items())
+    cache = deltas.SnapshotCache()
+    cache.update(rlog)
+    resultall = dict(cache.snapshots)
     if resultall != snapshotmapall:
         print('snapshot map  differ:')
         print('  expected: %s' % snapshotmapall)
         print('  got:      %s' % resultall)
-    result15 = collections.defaultdict(list)
-    deltas._findsnapshots(rlog, result15, 15)
-    result15 = dict(result15.items())
+    cache15 = deltas.SnapshotCache()
+    cache15.update(rlog, 15)
+    result15 = dict(cache15.snapshots)
     if result15 != snapshotmap15:
         print('snapshot map  differ:')
         print('  expected: %s' % snapshotmap15)
--- a/tests/test-revlog-v2.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-revlog-v2.t	Thu Mar 02 22:45:44 2023 +0100
@@ -117,16 +117,6 @@
 hg verify should be happy
 -------------------------
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
-  $ hg verify -R ../cloned-repo
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -R ../cloned-repo -q
--- a/tests/test-rhg-sparse-narrow.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rhg-sparse-narrow.t	Thu Mar 02 22:45:44 2023 +0100
@@ -75,16 +75,25 @@
   $ "$real_hg" cat -r "$tip" hide
   [1]
 
-A naive implementation of [rhg files] leaks the paths that are supposed to be
-hidden by narrow, so we just fall back to hg.
+A naive implementation of `rhg files` would leak the paths that are supposed
+to be hidden by narrow.
 
   $ $NO_FALLBACK rhg files -r "$tip"
-  unsupported feature: rhg files -r <rev> is not supported in narrow clones
-  [252]
+  dir1/x
+  dir1/y
   $ "$real_hg" files -r "$tip"
   dir1/x
   dir1/y
 
+The working copy version works with narrow correctly
+
+  $ $NO_FALLBACK rhg files
+  dir1/x
+  dir1/y
+  $ "$real_hg" files
+  dir1/x
+  dir1/y
+
 Hg status needs to do some filtering based on narrow spec
 
   $ mkdir dir2
@@ -96,12 +105,7 @@
 
   $ (cd ..; cp repo-sparse/.hg/store/data/hide.i repo-narrow/.hg/store/data/hide.i)
   $ (cd ..; mkdir repo-narrow/.hg/store/data/dir2; cp repo-sparse/.hg/store/data/dir2/z.i repo-narrow/.hg/store/data/dir2/z.i)
-  $ "$real_hg" verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 2 changes to 2 files
+  $ "$real_hg" verify -q
 
   $ "$real_hg" files -r "$tip"
   dir1/x
--- a/tests/test-rhg.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rhg.t	Thu Mar 02 22:45:44 2023 +0100
@@ -4,12 +4,11 @@
 
 Unimplemented command
   $ $NO_FALLBACK rhg unimplemented-command
-  unsupported feature: error: Found argument 'unimplemented-command' which wasn't expected, or isn't valid in this context
+  unsupported feature: error: The subcommand 'unimplemented-command' wasn't recognized
   
-  USAGE:
-      rhg [OPTIONS] <SUBCOMMAND>
+  Usage: rhg [OPTIONS] <COMMAND>
   
-  For more information try --help
+  For more information try '--help'
   
   [252]
   $ rhg unimplemented-command --config rhg.on-unsupported=abort-silent
@@ -159,10 +158,11 @@
   $ $NO_FALLBACK rhg cat original --exclude="*.rs"
   unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
   
-  USAGE:
-      rhg cat [OPTIONS] <FILE>...
+    If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
   
-  For more information try --help
+  Usage: rhg cat <FILE>...
+  
+  For more information try '--help'
   
   [252]
   $ rhg cat original --exclude="*.rs"
@@ -190,10 +190,11 @@
   Blocking recursive fallback. The 'rhg.fallback-executable = rhg' config points to `rhg` itself.
   unsupported feature: error: Found argument '--exclude' which wasn't expected, or isn't valid in this context
   
-  USAGE:
-      rhg cat [OPTIONS] <FILE>...
+    If you tried to supply '--exclude' as a value rather than a flag, use '-- --exclude'
   
-  For more information try --help
+  Usage: rhg cat <FILE>...
+  
+  For more information try '--help'
   
   [252]
 
--- a/tests/test-rollback.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-rollback.t	Thu Mar 02 22:45:44 2023 +0100
@@ -2,14 +2,9 @@
   $ hg init t
   $ cd t
   $ echo a > a
-  $ hg commit -Am'add a'
-  adding a
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg add a
+  $ hg commit -m 'add a'
+  $ hg verify -q
   $ hg parents
   changeset:   0:1f0dee641bb7
   tag:         tip
@@ -23,12 +18,7 @@
   $ hg rollback
   repository tip rolled back to revision -1 (undo commit)
   working directory now based on revision -1
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 0 changesets with 0 changes to 0 files
+  $ hg verify -q
   $ hg parents
   $ hg status
   A a
@@ -52,21 +42,11 @@
   $ cat .hg/last-message.txt ; echo
   modify a
 
-Test rollback of hg before issue 902 was fixed
 
-  $ hg commit -m "test3"
-  $ hg branch test
-  marked working directory as branch test
-  (branches are permanent and global, did you want a bookmark?)
-  $ rm .hg/undo.branch
-  $ hg rollback
-  repository tip rolled back to revision 0 (undo commit)
-  named branch could not be reset: current branch is still 'test'
-  working directory now based on revision 0
+working dir unaffected by rollback: do not restore dirstate et. al.
+  $ hg branch test --quiet
   $ hg branch
   test
-
-working dir unaffected by rollback: do not restore dirstate et. al.
   $ hg log --template '{rev}  {branch}  {desc|firstline}\n'
   0  default  add a again
   $ hg status
@@ -75,21 +55,45 @@
   $ hg commit -m'modify a again'
   $ echo b > b
   $ hg bookmark bar -r default #making bar active, before the transaction
-  $ hg commit -Am'add b'
-  adding b
-  $ hg log --template '{rev}  {branch}  {desc|firstline}\n'
-  2  test  add b
-  1  test  modify a again
-  0  default  add a again
+  $ hg log -G --template '{rev}  [{branch}] ({bookmarks}) {desc|firstline}\n'
+  @  1  [test] (foo) modify a again
+  |
+  o  0  [default] (bar) add a again
+  
+  $ hg add b
+  $ hg commit -m'add b'
+  $ hg log -G --template '{rev}  [{branch}] ({bookmarks}) {desc|firstline}\n'
+  @  2  [test] (foo) add b
+  |
+  o  1  [test] () modify a again
+  |
+  o  0  [default] (bar) add a again
+  
   $ hg update bar
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   (activating bookmark bar)
-  $ cat .hg/undo.branch ; echo
+  $ cat .hg/undo.backup.branch
   test
+  $ hg log -G --template '{rev}  [{branch}] ({bookmarks}) {desc|firstline}\n'
+  o  2  [test] (foo) add b
+  |
+  o  1  [test] () modify a again
+  |
+  @  0  [default] (bar) add a again
+  
+  $ hg rollback
+  abort: rollback of last commit while not checked out may lose data
+  (use -f to force)
+  [255]
   $ hg rollback -f
   repository tip rolled back to revision 1 (undo commit)
   $ hg id -n
   0
+  $ hg log -G --template '{rev}  [{branch}] ({bookmarks}) {desc|firstline}\n'
+  o  1  [test] (foo) modify a again
+  |
+  @  0  [default] (bar) add a again
+  
   $ hg branch
   default
   $ cat .hg/bookmarks.current ; echo
@@ -186,19 +190,14 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg rollback
   rolling back unknown transaction
+  working directory now based on revision 0
   $ cat a
   a
 
 corrupt journal test
   $ echo "foo" > .hg/store/journal
-  $ hg recover --verify
-  rolling back interrupted transaction
+  $ hg recover --verify -q
   couldn't read journal entry 'foo\n'!
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
 
 rollback disabled by config
   $ cat >> $HGRCPATH <<EOF
@@ -433,12 +432,7 @@
   abort: pretxncommit hook exited with status 1
   [40]
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ cd ..
 
@@ -458,11 +452,6 @@
 
   $ hg --config ui.ioerrors=pretxncommit,pretxnclose,txnclose,txnabort,msgabort,msgrollback commit -m 'multiple errors'
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-run-tests.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-run-tests.t	Thu Mar 02 22:45:44 2023 +0100
@@ -2086,4 +2086,4 @@
   $ ./test-py3.py
   3.* (glob)
   $ ./test-py.py
-  3.* (glob) (py3 !)
+  3.* (glob)
--- a/tests/test-share-bookmarks.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-share-bookmarks.t	Thu Mar 02 22:45:44 2023 +0100
@@ -99,8 +99,6 @@
      bm2                       2:c2e0ac586386 (svfs !)
    * bm3                       2:c2e0ac586386
      bmX                       2:c2e0ac586386 (vfs !)
-  transaction abort!
-  rollback completed
   abort: pretxnclose hook exited with status 1
   [40]
   $ hg book bm1
@@ -124,8 +122,6 @@
      bm2                       2:c2e0ac586386 (svfs !)
      bm3                       2:c2e0ac586386
    * bmX                       2:c2e0ac586386
-  transaction abort!
-  rollback completed
   abort: pretxnclose hook exited with status 1
   [40]
   $ hg book bm3
--- a/tests/test-shelve.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-shelve.t	Thu Mar 02 22:45:44 2023 +0100
@@ -1600,6 +1600,7 @@
   $ rm -r .hg/shelve*
 
 #if phasebased
+  $ cp $HGRCPATH $TESTTMP/hgrc-saved
   $ cat <<EOF >> $HGRCPATH
   > [shelve]
   > store = strip
@@ -1628,3 +1629,32 @@
 #if stripbased
   $ hg log --hidden --template '{user}\n'
 #endif
+
+clean up
+
+#if phasebased
+  $ mv $TESTTMP/hgrc-saved $HGRCPATH
+#endif
+
+changed files should be reachable in all shelves
+
+create an extension that emits changed files
+
+  $ cat > shelve-changed-files.py << EOF
+  > """Command to emit changed files for a shelf"""
+  > 
+  > from mercurial import registrar, shelve
+  > 
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > 
+  > 
+  > @command(b'shelve-changed-files')
+  > def shelve_changed_files(ui, repo, name):
+  >     shelf = shelve.ShelfDir(repo).get(name)
+  >     for file in shelf.changed_files(ui, repo):
+  >         ui.write(file + b'\n')
+  > EOF
+
+  $ hg --config extensions.shelve-changed-files=shelve-changed-files.py shelve-changed-files default
+  somefile.py
--- a/tests/test-simple-update.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-simple-update.t	Thu Mar 02 22:45:44 2023 +0100
@@ -5,12 +5,7 @@
   adding foo
   $ hg commit -m "1"
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ hg clone . ../branch
   updating to branch default
@@ -34,12 +29,7 @@
   1 local changesets published
   (run 'hg update' to get a working copy)
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
+  $ hg verify -q
 
   $ hg co
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-sparse-revlog.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-sparse-revlog.t	Thu Mar 02 22:45:44 2023 +0100
@@ -105,11 +105,11 @@
                      delta :        0 (100.00%)
       snapshot  :      383 ( 7.66%)
         lvl-0   :              3 ( 0.06%)
-        lvl-1   :             18 ( 0.36%)
-        lvl-2   :             62 ( 1.24%)
-        lvl-3   :            108 ( 2.16%)
-        lvl-4   :            191 ( 3.82%)
-        lvl-5   :              1 ( 0.02%)
+        lvl-1   :             18 ( 0.36%)  non-ancestor-bases:        9 (50.00%)
+        lvl-2   :             62 ( 1.24%)  non-ancestor-bases:       58 (93.55%)
+        lvl-3   :            108 ( 2.16%)  non-ancestor-bases:      108 (100.00%)
+        lvl-4   :            191 ( 3.82%)  non-ancestor-bases:      180 (94.24%)
+        lvl-5   :              1 ( 0.02%)  non-ancestor-bases:        1 (100.00%)
       deltas    :     4618 (92.34%)
   revision size : 58616973
       snapshot  :  9247844 (15.78%)
@@ -126,6 +126,9 @@
   chunks size   : 58616973
       0x28      : 58616973 (100.00%)
   
+  
+  total-stored-content: 1 732 705 361 bytes
+  
   avg chain length  :        9
   max chain length  :       15
   max chain reach   : 27366701
@@ -144,9 +147,11 @@
   deltas against prev  : 3906 (84.58%)
       where prev = p1  : 3906     (100.00%)
       where prev = p2  :    0     ( 0.00%)
-      other            :    0     ( 0.00%)
+      other-ancestor   :    0     ( 0.00%)
+      unrelated        :    0     ( 0.00%)
   deltas against p1    :  649 (14.05%)
   deltas against p2    :   63 ( 1.36%)
+  deltas against ancs  :    0 ( 0.00%)
   deltas against other :    0 ( 0.00%)
 
 
@@ -159,7 +164,7 @@
      4971    4970      -1       3        5     4930    snap      19179     346472     427596   1.23414  15994877  15567281   36.40652     427596     179288   1.00000        5
   $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971
   DBG-DELTAS-SEARCH: SEARCH rev=4971
-  DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
   DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
   DBG-DELTAS-SEARCH:     type=snapshot-4
   DBG-DELTAS-SEARCH:     size=18296
@@ -167,11 +172,43 @@
   DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
   DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
   DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
-  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4971
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+
+  $ cat << EOF >>.hg/hgrc
+  > [storage]
+  > revlog.optimize-delta-parent-choice = no
+  > revlog.reuse-external-delta = yes
+  > EOF
+
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --quiet
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source full
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
   DBG-DELTAS-SEARCH:     type=snapshot-4
-  DBG-DELTAS-SEARCH:     size=19179
+  DBG-DELTAS-SEARCH:     size=18296
   DBG-DELTAS-SEARCH:     base=4930
-  DBG-DELTAS-SEARCH:     TOO-HIGH
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
   DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
   DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
   DBG-DELTAS-SEARCH:     type=snapshot-3
@@ -189,6 +226,101 @@
   DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
   DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
   DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
-  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source storage
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - cached-delta
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=1 - search-rounds=1 try-count=1 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p1
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
+  DBG-DELTAS-SEARCH:     type=snapshot-4
+  DBG-DELTAS-SEARCH:     size=18296
+  DBG-DELTAS-SEARCH:     base=4930
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p2
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
+  DBG-DELTAS-SEARCH:     type=snapshot-4
+  DBG-DELTAS-SEARCH:     size=18296
+  DBG-DELTAS-SEARCH:     base=4930
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source prev
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
+  DBG-DELTAS-SEARCH:     type=snapshot-4
+  DBG-DELTAS-SEARCH:     size=18296
+  DBG-DELTAS-SEARCH:     base=4930
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
 
   $ cd ..
--- a/tests/test-split.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-split.t	Thu Mar 02 22:45:44 2023 +0100
@@ -156,8 +156,6 @@
   record change 3/3 to 'a'?
   (enter ? for help) [Ynesfdaq?] y
   
-  transaction abort!
-  rollback completed
   abort: edit failed: false exited with status 1
   [250]
   $ hg status
--- a/tests/test-ssh-bundle1.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-ssh-bundle1.t	Thu Mar 02 22:45:44 2023 +0100
@@ -71,12 +71,7 @@
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd local-stream
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ hg branches
   default                        0:1160648e36ce
   $ cd $TESTTMP
@@ -117,12 +112,7 @@
 verify
 
   $ cd local
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ cat >> .hg/hgrc <<EOF
   > [hooks]
   > changegroup = sh -c "printenv.py --line changegroup-in-local 0 ../dummylog"
@@ -214,12 +204,7 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     add
   
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 3 changes to 2 files
+  $ hg verify -q
   $ hg cat -r tip foo
   bleah
   $ echo z > z
@@ -292,10 +277,8 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
+  remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   $ hg -R ../remote heads
   changeset:   5:1383141674ec
   tag:         tip
@@ -462,10 +445,8 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
+  remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   local stdout
 
 debug output
--- a/tests/test-ssh-clone-r.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-ssh-clone-r.t	Thu Mar 02 22:45:44 2023 +0100
@@ -20,7 +20,7 @@
   $ for i in 0 1 2 3 4 5 6 7 8; do
   >    hg clone --stream -r "$i" ssh://user@dummy/remote test-"$i"
   >    if cd test-"$i"; then
-  >       hg verify
+  >       hg verify -q
   >       cd ..
   >    fi
   > done
@@ -31,11 +31,6 @@
   new changesets bfaf4b5cbf01
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -43,11 +38,6 @@
   new changesets bfaf4b5cbf01:21f32785131f
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -55,11 +45,6 @@
   new changesets bfaf4b5cbf01:4ce51a113780
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -67,11 +52,6 @@
   new changesets bfaf4b5cbf01:93ee6ab32777
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 4 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -79,11 +59,6 @@
   new changesets bfaf4b5cbf01:c70afb1ee985
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -91,11 +66,6 @@
   new changesets bfaf4b5cbf01:f03ae5a9b979
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 1 files
   adding changesets
   adding manifests
   adding file changes
@@ -103,11 +73,6 @@
   new changesets bfaf4b5cbf01:095cb14b1b4d
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 5 changes to 2 files
   adding changesets
   adding manifests
   adding file changes
@@ -115,11 +80,6 @@
   new changesets bfaf4b5cbf01:faa2e4234c7a
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 6 changes to 3 files
   adding changesets
   adding manifests
   adding file changes
@@ -127,11 +87,6 @@
   new changesets bfaf4b5cbf01:916f1afdef90
   updating to branch default
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
   $ cd test-8
   $ hg pull ../test-7
   pulling from ../test-7
@@ -142,12 +97,7 @@
   added 4 changesets with 2 changes to 3 files (+1 heads)
   new changesets c70afb1ee985:faa2e4234c7a
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
   $ cd ..
   $ cd test-1
   $ hg pull -r 4 ssh://user@dummy/remote
@@ -159,12 +109,7 @@
   added 1 changesets with 0 changes to 0 files (+1 heads)
   new changesets c70afb1ee985
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 2 changes to 1 files
+  $ hg verify -q
   $ hg pull ssh://user@dummy/remote
   pulling from ssh://user@dummy/remote
   searching for changes
@@ -185,12 +130,7 @@
   added 2 changesets with 0 changes to 0 files (+1 heads)
   new changesets c70afb1ee985:f03ae5a9b979
   (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 3 changes to 1 files
+  $ hg verify -q
   $ hg pull ssh://user@dummy/remote
   pulling from ssh://user@dummy/remote
   searching for changes
@@ -200,11 +140,6 @@
   added 4 changesets with 4 changes to 4 files
   new changesets 93ee6ab32777:916f1afdef90
   (run 'hg update' to get a working copy)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 9 changesets with 7 changes to 4 files
+  $ hg verify -q
 
   $ cd ..
--- a/tests/test-ssh.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-ssh.t	Thu Mar 02 22:45:44 2023 +0100
@@ -61,12 +61,7 @@
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd local-stream
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ hg branches
   default                        0:1160648e36ce
   $ cd $TESTTMP
@@ -103,12 +98,7 @@
 verify
 
   $ cd local
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ cat >> .hg/hgrc <<EOF
   > [hooks]
   > changegroup = sh -c "printenv.py changegroup-in-local 0 ../dummylog"
@@ -200,12 +190,7 @@
   date:        Thu Jan 01 00:00:00 1970 +0000
   summary:     add
   
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 3 changes to 2 files
+  $ hg verify -q
   $ hg cat -r tip foo
   bleah
   $ echo z > z
@@ -289,11 +274,9 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
+  remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
   remote: KABOOM IN PROCESS
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   $ hg -R ../remote heads
   changeset:   5:1383141674ec
   tag:         tip
@@ -323,7 +306,7 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files (py3 !)
+  remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
   remote: KABOOM IN PROCESS
 
@@ -514,11 +497,9 @@
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
+  remote: added 1 changesets with 1 changes to 1 files
   remote: KABOOM
   remote: KABOOM IN PROCESS
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   local stdout
 
 debug output
--- a/tests/test-static-http.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-static-http.t	Thu Mar 02 22:45:44 2023 +0100
@@ -38,12 +38,7 @@
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd local
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ cat bar
   foo
   $ cd ../remote
@@ -134,13 +129,7 @@
   new changesets be090ea66256:322ea90975df
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd local2
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 3 changes to 3 files
-  checking subrepo links
+  $ hg verify -q
   $ cat a
   a
   $ hg paths
@@ -155,12 +144,7 @@
   updating to branch default
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ cd local3
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 0 changesets with 0 changes to 0 files
+  $ hg verify -q
   $ hg paths
   default = static-http://localhost:$HGPORT/remotempty
 
--- a/tests/test-strip-cross.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-strip-cross.t	Thu Mar 02 22:45:44 2023 +0100
@@ -80,35 +80,20 @@
   >     echo "% Trying to strip revision $i"
   >     hg --cwd $i strip $i
   >     echo "% Verifying"
-  >     hg --cwd $i verify
+  >     hg --cwd $i verify -q
   >     echo
   > done
   % Trying to strip revision 0
   saved backup bundle to $TESTTMP/files/0/.hg/strip-backup/cbb8c2f0a2e3-239800b9-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 12 changes to 6 files
   
   % Trying to strip revision 1
   saved backup bundle to $TESTTMP/files/1/.hg/strip-backup/124ecc0cbec9-6104543f-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 12 changes to 6 files
   
   % Trying to strip revision 2
   saved backup bundle to $TESTTMP/files/2/.hg/strip-backup/f6439b304a1a-c6505a5f-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 12 changes to 6 files
   
   $ cd ..
 
@@ -139,26 +124,16 @@
   >     echo "% Trying to strip revision $i"
   >     hg --cwd $i strip $i
   >     echo "% Verifying"
-  >     hg --cwd $i verify
+  >     hg --cwd $i verify -q
   >     echo
   > done
   % Trying to strip revision 2
   saved backup bundle to $TESTTMP/manifests/2/.hg/strip-backup/f3015ad03c03-4d98bdc2-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 2 files
   
   % Trying to strip revision 3
   saved backup bundle to $TESTTMP/manifests/3/.hg/strip-backup/9632aa303aa4-69192e3f-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 2 files
   
   $ cd ..
 
@@ -194,27 +169,16 @@
   >     echo "% Trying to strip revision $i"
   >     hg --cwd $i strip $i
   >     echo "% Verifying"
-  >     hg --cwd $i verify
+  >     hg --cwd $i verify -q
   >     echo
   > done
   % Trying to strip revision 2
   saved backup bundle to $TESTTMP/treemanifests/2/.hg/strip-backup/145f5c75f9ac-a105cfbe-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 4 changes to 3 files
   
   % Trying to strip revision 3
   saved backup bundle to $TESTTMP/treemanifests/3/.hg/strip-backup/e4e3de5c3cb2-f4c70376-backup.hg
   % Verifying
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 4 changes to 3 files
   
+
   $ cd ..
--- a/tests/test-subrepo-missing.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-subrepo-missing.t	Thu Mar 02 22:45:44 2023 +0100
@@ -111,13 +111,7 @@
 
   $ hg ci -m "amended subrepo (again)"
   $ hg --config extensions.strip= --hidden strip -R subrepo -qr 'tip' --config devel.strip-obsmarkers=no
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
-  checking subrepo links
+  $ hg verify -q
   subrepo 'subrepo' is hidden in revision a66de08943b6
   subrepo 'subrepo' is hidden in revision 674d05939c1e
   subrepo 'subrepo' not found in revision a7d05d9055a4
@@ -125,13 +119,7 @@
 verifying shouldn't init a new subrepo if the reference doesn't exist
 
   $ mv subrepo b
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 5 changesets with 5 changes to 2 files
-  checking subrepo links
+  $ hg verify -q
   0: repository $TESTTMP/repo/subrepo not found
   1: repository $TESTTMP/repo/subrepo not found
   3: repository $TESTTMP/repo/subrepo not found
--- a/tests/test-transaction-wc-rollback-race.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-transaction-wc-rollback-race.t	Thu Mar 02 22:45:44 2023 +0100
@@ -134,8 +134,6 @@
   $ hg phase --rev 0
   0: draft
   $ cat ../log.err
-  transaction abort!
-  rollback completed
   abort: pretxnclose.test hook exited with status 1
 
 Actual testing
@@ -153,7 +151,7 @@
   $ touch $TESTTMP/transaction-continue
   $ wait
   $ hg status
-  R default_a (missing-correct-output !)
+  R default_a
   $ hg revert --all --quiet
 
 Changing branch from default
@@ -204,10 +202,8 @@
   $ touch $TESTTMP/transaction-continue
   $ wait
   $ hg log --rev . -T '{desc}\n'
-  babar_l (missing-correct-output !)
-  babar_m (known-bad-output !)
+  babar_l
   $ hg st
-  ! babar_m (known-bad-output !)
 
   $ hg purge --no-confirm
   $ hg up --quiet babar
--- a/tests/test-treemanifest.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-treemanifest.t	Thu Mar 02 22:45:44 2023 +0100
@@ -399,13 +399,7 @@
   added 11 changesets with 15 changes to 10 files (+3 heads)
   $ hg debugrequires -R clone | grep treemanifest
   treemanifest
-  $ hg -R clone verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 11 changesets with 15 changes to 10 files
+  $ hg -R clone verify -q
 
 Create deeper repo with tree manifests.
 
@@ -567,13 +561,7 @@
   $ hg ci -m troz
 
 Verify works
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg verify -q
 
 #if repofncache
 Dirlogs are included in fncache
@@ -631,6 +619,7 @@
    b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
    b/foo/apple/bees/flower.py@0: in changeset but not in manifest
   checking files
+  not checking dirstate because of previous errors
   checked 4 changesets with 18 changes to 8 files
   6 warnings encountered! (reporevlogstore !)
   9 integrity errors encountered!
@@ -656,6 +645,7 @@
    (expected None)
   crosschecking files in changesets and manifests
   checking files
+  not checking dirstate because of previous errors
   checked 4 changesets with 18 changes to 8 files
   2 warnings encountered!
   8 integrity errors encountered!
@@ -707,13 +697,7 @@
   deepclone/.hg/store/meta/~2e_a/00manifest.i (reporevlogstore !)
 Verify passes.
   $ cd deepclone
-  $ hg verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg verify -q
   $ cd ..
 
 #if reporevlogstore
@@ -755,33 +739,15 @@
 
 Local clone with basicstore
   $ hg clone -U deeprepo-basicstore local-clone-basicstore
-  $ hg -R local-clone-basicstore verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg -R local-clone-basicstore verify -q
 
 Local clone with encodedstore
   $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
-  $ hg -R local-clone-encodedstore verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg -R local-clone-encodedstore verify -q
 
 Local clone with fncachestore
   $ hg clone -U deeprepo local-clone-fncachestore
-  $ hg -R local-clone-fncachestore verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg -R local-clone-fncachestore verify -q
 
 Stream clone with basicstore
   $ hg clone --config experimental.changegroup3=True --stream -U \
@@ -789,13 +755,7 @@
   streaming all changes
   28 files to transfer, * of data (glob)
   transferred * in * seconds (*) (glob)
-  $ hg -R stream-clone-basicstore verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg -R stream-clone-basicstore verify -q
 
 Stream clone with encodedstore
   $ hg clone --config experimental.changegroup3=True --stream -U \
@@ -803,13 +763,7 @@
   streaming all changes
   28 files to transfer, * of data (glob)
   transferred * in * seconds (*) (glob)
-  $ hg -R stream-clone-encodedstore verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg -R stream-clone-encodedstore verify -q
 
 Stream clone with fncachestore
   $ hg clone --config experimental.changegroup3=True --stream -U \
@@ -817,13 +771,7 @@
   streaming all changes
   22 files to transfer, * of data (glob)
   transferred * in * seconds (*) (glob)
-  $ hg -R stream-clone-fncachestore verify
-  checking changesets
-  checking manifests
-  checking directory manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 4 changesets with 18 changes to 8 files
+  $ hg -R stream-clone-fncachestore verify -q
 
 Packed bundle
   $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
--- a/tests/test-unamend.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-unamend.t	Thu Mar 02 22:45:44 2023 +0100
@@ -363,13 +363,7 @@
   $ hg mv c wat
   $ hg unamend
 
-  $ hg verify -v
-  repository uses revlog format 1
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 28 changesets with 16 changes to 11 files
+  $ hg verify -q
 
 Retained copies in new prdecessor commit
 
--- a/tests/test-unionrepo.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-unionrepo.t	Thu Mar 02 22:45:44 2023 +0100
@@ -133,12 +133,7 @@
   $ hg -R repo3 paths
   default = union:repo1+repo2
 
-  $ hg -R repo3 verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 6 changesets with 11 changes to 6 files
+  $ hg -R repo3 verify -q
 
   $ hg -R repo3 heads --template '{rev}:{node|short}  {desc|firstline}\n'
   5:2f0d178c469c  repo2-3
--- a/tests/test-upgrade-repo.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-upgrade-repo.t	Thu Mar 02 22:45:44 2023 +0100
@@ -844,7 +844,6 @@
   requires
   undo
   undo.backupfiles
-  undo.phaseroots
 
 manifest should be generaldelta
 
@@ -853,12 +852,7 @@
 
 verify should be happy
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
 old store should be backed up
 
@@ -874,7 +868,6 @@
   undo
   undo.backup.fncache
   undo.backupfiles
-  undo.phaseroots
 
 unless --no-backup is passed
 
@@ -972,7 +965,7 @@
 Check that the repo still works fine
 
   $ hg log -G --stat
-  @  changeset:   2:fca376863211 (py3 !)
+  @  changeset:   2:fca376863211
   |  tag:         tip
   |  parent:      0:ba592bf28da2
   |  user:        test
@@ -995,12 +988,7 @@
   
   
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
 Check we can select negatively
 
@@ -1047,12 +1035,7 @@
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
 Check that we can select changelog only
 
@@ -1098,12 +1081,7 @@
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
 Check that we can select filelog only
 
@@ -1149,12 +1127,7 @@
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
 
 Check you can't skip revlog clone during important format downgrade
@@ -1224,12 +1197,7 @@
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
 Check you can't skip revlog clone during important format upgrade
 
@@ -1285,12 +1253,7 @@
   store replacement complete; repository was inconsistent for *s (glob)
   finalizing requirements file and making repository readable again
   removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 3 changesets with 3 changes to 3 files
+  $ hg verify -q
 
   $ cd ..
 
@@ -1413,12 +1376,7 @@
   lfs
   $ find .hg/store/lfs -type f
   .hg/store/lfs/objects/d0/beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 2 changesets with 2 changes to 2 files
+  $ hg verify -q
   $ hg debugdata lfs.bin 0
   version https://git-lfs.github.com/spec/v1
   oid sha256:d0beab232adff5ba365880366ad30b1edb85c4c5372442b5d2fe27adc96d653f
--- a/tests/test-url-download.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-url-download.t	Thu Mar 02 22:45:44 2023 +0100
@@ -52,7 +52,7 @@
   $ hg -R server debuglfput null.txt
   a57b57b39ee4dc3da1e03526596007f480ecdbe8
 
-  $ hg --traceback debugdownload "largefile://a57b57b39ee4dc3da1e03526596007f480ecdbe8" --config paths.default=http://localhost:$HGPORT/
+  $ hg debugdownload "largefile://a57b57b39ee4dc3da1e03526596007f480ecdbe8" --config paths.default=http://localhost:$HGPORT/
   1 0000000000000000000000000000000000000000
 
 from within a repository
--- a/tests/test-util.py	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-util.py	Thu Mar 02 22:45:44 2023 +0100
@@ -50,7 +50,7 @@
 
 # attr.s default factory for util.timedstats.start binds the timer we
 # need to mock out.
-_start_default = (util.timedcmstats.start.default, 'factory')
+_start_default = (util.timedcmstats.__attrs_attrs__.start.default, 'factory')
 
 
 @contextlib.contextmanager
--- a/tests/test-verify.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-verify.t	Thu Mar 02 22:45:44 2023 +0100
@@ -20,6 +20,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 1 changesets with 3 changes to 3 files
 
 verify with journal
@@ -31,6 +32,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 1 changesets with 3 changes to 3 files
   $ rm .hg/store/journal
 
@@ -55,6 +57,7 @@
    warning: revlog 'data/bar.txt.i' not in fncache!
    0: empty or missing bar.txt
    bar.txt@0: manifest refers to unknown revision 256559129457
+  not checking dirstate because of previous errors
   checked 1 changesets with 0 changes to 3 files
   3 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
@@ -83,6 +86,7 @@
    0: empty or missing changelog
    manifest@0: d0b6632564d4 not in changesets
    manifest@1: 941fc4534185 not in changesets
+  not checking dirstate because of previous errors
   3 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -93,6 +97,7 @@
   $ rm .hg/store/00manifest.*
   $ hg verify -q
    0: empty or missing manifest
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -106,6 +111,7 @@
    0: empty or missing file
    file@0: manifest refers to unknown revision 362fef284ce2
    file@1: manifest refers to unknown revision c10f2164107d
+  not checking dirstate because of previous errors
   1 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
   3 integrity errors encountered!
@@ -119,7 +125,13 @@
   $ rm .hg/store/00manifest.*
   $ hg verify -q
   warning: orphan data file 'data/file.i'
+  warning: ignoring unknown working parent c5ddb05ab828!
+  file marked as tracked in p1 (000000000000) but not in manifest1
   1 warnings encountered!
+  1 integrity errors encountered!
+  dirstate inconsistent with current parent's manifest
+  1 dirstate errors
+  [1]
   $ cp -R .hg/store-full/. .hg/store
 
 Entire changelog and filelog missing
@@ -134,6 +146,7 @@
    ?: empty or missing file
    file@0: manifest refers to unknown revision 362fef284ce2
    file@1: manifest refers to unknown revision c10f2164107d
+  not checking dirstate because of previous errors
   1 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
   6 integrity errors encountered!
@@ -149,6 +162,7 @@
    0: empty or missing manifest
    warning: revlog 'data/file.i' not in fncache!
    0: empty or missing file
+  not checking dirstate because of previous errors
   1 warnings encountered!
   hint: run "hg debugrebuildfncache" to recover from corrupt fncache
   2 integrity errors encountered!
@@ -164,6 +178,7 @@
    manifest@?: 941fc4534185 not in changesets
    file@?: rev 1 points to nonexistent changeset 1
    (expected 0)
+  not checking dirstate because of previous errors
   1 warnings encountered!
   3 integrity errors encountered!
   [1]
@@ -175,6 +190,7 @@
   $ hg verify -q
    manifest@1: changeset refers to unknown revision 941fc4534185
    file@1: c10f2164107d not in manifests
+  not checking dirstate because of previous errors
   2 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
@@ -185,6 +201,7 @@
   $ cp -f .hg/store-partial/data/file.* .hg/store/data
   $ hg verify -q
    file@1: manifest refers to unknown revision c10f2164107d
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
@@ -198,6 +215,7 @@
    file@?: rev 1 points to nonexistent changeset 1
    (expected 0)
    file@?: c10f2164107d not in manifests
+  not checking dirstate because of previous errors
   1 warnings encountered!
   2 integrity errors encountered!
   [1]
@@ -211,6 +229,7 @@
    manifest@?: rev 1 points to nonexistent changeset 1
    manifest@?: 941fc4534185 not in changesets
    file@?: manifest refers to unknown revision c10f2164107d
+  not checking dirstate because of previous errors
   3 integrity errors encountered!
   [1]
   $ cp -R .hg/store-full/. .hg/store
@@ -221,6 +240,7 @@
   $ cp -f .hg/store-partial/data/file.* .hg/store/data
   $ hg verify -q
    manifest@1: changeset refers to unknown revision 941fc4534185
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
   [1]
@@ -236,6 +256,7 @@
    manifest@?: d0b6632564d4 not in changesets
    file@?: rev 0 points to unexpected changeset 0
    (expected 1)
+  not checking dirstate because of previous errors
   1 warnings encountered!
   4 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -249,6 +270,7 @@
   $ hg verify -q
    manifest@0: reading delta d0b6632564d4: * (glob)
    file@0: 362fef284ce2 not in manifests
+  not checking dirstate because of previous errors
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -260,6 +282,7 @@
   >   2> /dev/null
   $ hg verify -q
    file@0: unpacking 362fef284ce2: * (glob)
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
@@ -275,12 +298,7 @@
   marked working directory as branch foo
   (branches are permanent and global, did you want a bookmark?)
   $ hg ci -m branchfoo
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 0 changes to 0 files
+  $ hg verify -q
 
 test revlog corruption
 
@@ -292,14 +310,10 @@
   $ dd if=.hg/store/data/a.i of=start bs=1 count=20 2>/dev/null
   $ cat start b > .hg/store/data/a.i
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-   a@1: broken revlog! (index data/a is corrupted)
+  $ hg verify -q
+   a@1: broken revlog! (index a is corrupted)
   warning: orphan data file 'data/a.i'
-  checked 2 changesets with 0 changes to 1 files
+  not checking dirstate because of previous errors
   1 warnings encountered!
   1 integrity errors encountered!
   (first damaged changeset appears to be 1)
@@ -317,6 +331,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
+  checking dirstate
   checked 1 changesets with 1 changes to 1 files
   $ cd ..
 
@@ -330,12 +345,7 @@
   > EOF
   $ echo '[BASE64]content' > base64
   $ hg commit -Aqm 'flag processor content' base64
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
 
   $ cat >> $TESTTMP/break-base64.py <<EOF
   > import base64
@@ -345,20 +355,11 @@
   > breakbase64=$TESTTMP/break-base64.py
   > EOF
 
-  $ hg verify
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-   base64@0: unpacking 794cee7777cb: integrity check failed on data/base64:0
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify -q
+   base64@0: unpacking 794cee7777cb: integrity check failed on base64:0
+  not checking dirstate because of previous errors
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
   [1]
-  $ hg verify --config verify.skipflags=2147483647
-  checking changesets
-  checking manifests
-  crosschecking files in changesets and manifests
-  checking files
-  checked 1 changesets with 1 changes to 1 files
+  $ hg verify --config verify.skipflags=2147483647 -q
 
--- a/tests/test-worker.t	Thu Mar 02 15:21:36 2023 +0100
+++ b/tests/test-worker.t	Thu Mar 02 22:45:44 2023 +0100
@@ -86,9 +86,9 @@
   $ hg --config "extensions.t=$abspath" --config 'worker.numcpus=8' \
   > test 100000.0 abort --traceback 2>&1 | egrep '(WorkerError|Abort)'
       raise error.Abort(b'known exception')
-  mercurial.error.Abort: known exception (py3 !)
+  mercurial.error.Abort: known exception
       raise error.WorkerError(status)
-  mercurial.error.WorkerError: 255 (py3 !)
+  mercurial.error.WorkerError: 255
 
 Traceback must be printed for unknown exceptions