branching: merge default into stable for 6.6rc0 stable
authorRaphaël Gomès <rgomes@octobus.net>
Tue, 07 Nov 2023 15:21:11 +0100
branchstable
changeset 51125 4224b1aa7ad8
parent 51117 f6bb9d1c230c (current diff)
parent 51124 80bda4254b84 (diff)
child 51126 27055614b685
branching: merge default into stable for 6.6rc0
--- a/contrib/benchmarks/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/benchmarks/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -40,7 +40,6 @@
     extensions,
     hg,
     ui as uimod,
-    util,
 )
 
 basedir = os.path.abspath(
@@ -66,7 +65,7 @@
     os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
     # for "historical portability"
     # ui.load() has been available since d83ca85
-    if util.safehasattr(uimod.ui, "load"):
+    if hasattr(uimod.ui, "load"):
         ui = uimod.ui.load()
     else:
         ui = uimod.ui()
--- a/contrib/byteify-strings.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/byteify-strings.py	Tue Nov 07 15:21:11 2023 +0100
@@ -212,18 +212,14 @@
             fn = t.string
 
             # *attr() builtins don't accept byte strings to 2nd argument.
-            if (
-                fn
-                in (
-                    'getattr',
-                    'setattr',
-                    'hasattr',
-                    'safehasattr',
-                    'wrapfunction',
-                    'wrapclass',
-                    'addattr',
-                )
-                and (opts['allow-attr-methods'] or not _isop(i - 1, '.'))
+            if fn in (
+                'getattr',
+                'setattr',
+                'hasattr',
+                'safehasattr',
+                'wrapfunction',
+                'wrapclass',
+                'addattr',
             ):
                 arg1idx = _findargnofcall(1)
                 if arg1idx is not None:
@@ -312,12 +308,6 @@
         help='rewrite iteritems() and itervalues()',
     ),
     ap.add_argument(
-        '--allow-attr-methods',
-        action='store_true',
-        default=False,
-        help='also handle attr*() when they are methods',
-    ),
-    ap.add_argument(
         '--treat-as-kwargs',
         nargs="+",
         default=[],
@@ -328,7 +318,6 @@
     opts = {
         'dictiter': args.dictiter,
         'treat-as-kwargs': set(args.treat_as_kwargs),
-        'allow-attr-methods': args.allow_attr_methods,
     }
     for fname in args.files:
         fname = os.path.realpath(fname)
--- a/contrib/check-code.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/check-code.py	Tue Nov 07 15:21:11 2023 +0100
@@ -383,12 +383,6 @@
             "use True/False for constant Boolean expression",
         ),
         (r'^\s*if False(:| +and)', 'Remove code instead of using `if False`'),
-        (
-            r'(?:(?<!def)\s+|\()hasattr\(',
-            'hasattr(foo, bar) is broken on py2, use util.safehasattr(foo, bar) '
-            'instead',
-            r'#.*hasattr-py3-only',
-        ),
         (r'opener\([^)]*\).read\(', "use opener.read() instead"),
         (r'opener\([^)]*\).write\(', "use opener.write() instead"),
         (r'(?i)descend[e]nt', "the proper spelling is descendAnt"),
--- a/contrib/check-pytype.sh	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/check-pytype.sh	Tue Nov 07 15:21:11 2023 +0100
@@ -26,7 +26,6 @@
 # hgext/githelp.py              # [attribute-error] [wrong-arg-types]
 # hgext/hgk.py                  # [attribute-error]
 # hgext/histedit.py             # [attribute-error], [wrong-arg-types]
-# hgext/infinitepush            # using bytes for str literal; scheduled for removal
 # hgext/keyword.py              # [attribute-error]
 # hgext/largefiles/storefactory.py  # [attribute-error]
 # hgext/lfs/__init__.py         # [attribute-error]
@@ -88,7 +87,6 @@
     -x hgext/githelp.py \
     -x hgext/hgk.py \
     -x hgext/histedit.py \
-    -x hgext/infinitepush \
     -x hgext/keyword.py \
     -x hgext/largefiles/storefactory.py \
     -x hgext/lfs/__init__.py \
--- a/contrib/import-checker.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/import-checker.py	Tue Nov 07 15:21:11 2023 +0100
@@ -45,6 +45,7 @@
     'mercurial.thirdparty',
     'mercurial.thirdparty.attr',
     'mercurial.thirdparty.jaraco.collections',
+    'mercurial.thirdparty.tomli',
     'mercurial.thirdparty.zope',
     'mercurial.thirdparty.zope.interface',
     'typing',
--- a/contrib/merge-lists/Cargo.lock	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/merge-lists/Cargo.lock	Tue Nov 07 15:21:11 2023 +0100
@@ -12,6 +12,55 @@
 ]
 
 [[package]]
+name = "anstream"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is-terminal",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
+
+[[package]]
+name = "anstyle-parse"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
+dependencies = [
+ "anstyle",
+ "windows-sys",
+]
+
+[[package]]
 name = "assert_cmd"
 version = "2.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -26,17 +75,6 @@
 ]
 
 [[package]]
-name = "atty"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-dependencies = [
- "hermit-abi",
- "libc",
- "winapi",
-]
-
-[[package]]
 name = "autocfg"
 version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -44,9 +82,9 @@
 
 [[package]]
 name = "bitflags"
-version = "1.3.2"
+version = "2.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42"
 
 [[package]]
 name = "bstr"
@@ -60,36 +98,59 @@
 ]
 
 [[package]]
+name = "cc"
+version = "1.0.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
+
+[[package]]
 name = "clap"
-version = "3.1.6"
+version = "4.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8c93436c21e4698bacadf42917db28b23017027a4deccb35dbe47a7e7840123"
+checksum = "5b0827b011f6f8ab38590295339817b0d26f344aa4932c3ced71b45b0c54b4a9"
 dependencies = [
- "atty",
- "bitflags",
+ "clap_builder",
  "clap_derive",
- "indexmap",
- "lazy_static",
- "os_str_bytes",
+ "once_cell",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.3.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9441b403be87be858db6a23edb493e7f694761acdc3343d5a0fcaafd304cbc9e"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "clap_lex",
  "strsim",
- "termcolor",
- "textwrap",
 ]
 
 [[package]]
 name = "clap_derive"
-version = "3.1.4"
+version = "4.3.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da95d038ede1a964ce99f49cbe27a7fb538d1da595e4b4f70b8c8f338d17bf16"
+checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
 dependencies = [
  "heck",
- "proc-macro-error",
  "proc-macro2",
  "quote",
- "syn",
+ "syn 2.0.27",
 ]
 
 [[package]]
+name = "clap_lex"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
+
+[[package]]
+name = "colorchoice"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
+
+[[package]]
 name = "console"
 version = "0.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -127,6 +188,27 @@
 checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
 
 [[package]]
+name = "errno"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "windows-sys",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+dependencies = [
+ "cc",
+ "libc",
+]
+
+[[package]]
 name = "fuchsia-cprng"
 version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -146,12 +228,9 @@
 
 [[package]]
 name = "hermit-abi"
-version = "0.1.19"
+version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
-dependencies = [
- "libc",
-]
+checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
 
 [[package]]
 name = "indexmap"
@@ -178,6 +257,17 @@
 ]
 
 [[package]]
+name = "is-terminal"
+version = "0.4.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
+dependencies = [
+ "hermit-abi",
+ "rustix",
+ "windows-sys",
+]
+
+[[package]]
 name = "itertools"
 version = "0.10.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -200,9 +290,9 @@
 
 [[package]]
 name = "libc"
-version = "0.2.119"
+version = "0.2.147"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
+checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
 
 [[package]]
 name = "linked-hash-map"
@@ -211,6 +301,12 @@
 checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3"
 
 [[package]]
+name = "linux-raw-sys"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0"
+
+[[package]]
 name = "memchr"
 version = "2.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -231,18 +327,9 @@
 
 [[package]]
 name = "once_cell"
-version = "1.10.0"
+version = "1.18.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
-
-[[package]]
-name = "os_str_bytes"
-version = "6.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64"
-dependencies = [
- "memchr",
-]
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
 
 [[package]]
 name = "predicates"
@@ -272,43 +359,19 @@
 ]
 
 [[package]]
-name = "proc-macro-error"
-version = "1.0.4"
+name = "proc-macro2"
+version = "1.0.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
 dependencies = [
- "proc-macro-error-attr",
- "proc-macro2",
- "quote",
- "syn",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro-error-attr"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
-dependencies = [
- "proc-macro2",
- "quote",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.36"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029"
-dependencies = [
- "unicode-xid",
+ "unicode-ident",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.15"
+version = "1.0.31"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145"
+checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0"
 dependencies = [
  "proc-macro2",
 ]
@@ -383,6 +446,19 @@
 ]
 
 [[package]]
+name = "rustix"
+version = "0.38.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5"
+dependencies = [
+ "bitflags",
+ "errno",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys",
+]
+
+[[package]]
 name = "ryu"
 version = "1.0.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -405,7 +481,7 @@
 dependencies = [
  "proc-macro2",
  "quote",
- "syn",
+ "syn 1.0.87",
 ]
 
 [[package]]
@@ -458,6 +534,17 @@
 ]
 
 [[package]]
+name = "syn"
+version = "2.0.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b60f673f44a8255b9c8c657daf66a596d435f2da81a555b06dc644d080ba45e0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
 name = "tempdir"
 version = "0.3.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -468,15 +555,6 @@
 ]
 
 [[package]]
-name = "termcolor"
-version = "1.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
 name = "terminal_size"
 version = "0.1.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -493,10 +571,10 @@
 checksum = "507e9898683b6c43a9aa55b64259b721b52ba226e0f3779137e50ad114a4c90b"
 
 [[package]]
-name = "textwrap"
-version = "0.15.0"
+name = "unicode-ident"
+version = "1.0.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
 
 [[package]]
 name = "unicode-xid"
@@ -505,10 +583,10 @@
 checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
 
 [[package]]
-name = "version_check"
-version = "0.9.4"
+name = "utf8parse"
+version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
 
 [[package]]
 name = "wait-timeout"
@@ -536,21 +614,78 @@
 checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
 
 [[package]]
-name = "winapi-util"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
-dependencies = [
- "winapi",
-]
-
-[[package]]
 name = "winapi-x86_64-pc-windows-gnu"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
 
 [[package]]
+name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
+
+[[package]]
 name = "yaml-rust"
 version = "0.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
--- a/contrib/merge-lists/Cargo.toml	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/merge-lists/Cargo.toml	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,7 @@
 rust-version = "1.59"
 
 [dependencies]
-clap = { version = "3.1.6", features = ["derive"] }
+clap = { version = "4.3.17", features = ["derive"] }
 itertools = "0.10.3"
 regex = "1.5.5"
 similar = { version="2.1.0", features = ["bytes"] }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/build.com	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,422 @@
+$!
+$! Build Python C extension
+$!
+$ cc/name=(short,as_is)-
+	/incl=("/python$root/include", "../../mercurial") -
+	[--.mercurial.cext]base85.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial]bdiff.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.cext]bdiff.c -
+	/obj=[]bdiff-mod.obj
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.thirdparty.xdiff]xdiffi.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.thirdparty.xdiff]xprepare.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+	[--.mercurial.thirdparty.xdiff]xutils.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.cext]mpatch.c/obj=mpatch-mod.obj
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial]mpatch.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+	/warn=disa=QUESTCOMPARE -
+        [--.mercurial.cext]dirs.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.cext]charencode.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.cext]revlog.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.cext]manifest.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+        [--.mercurial.cext]pathencode.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+	/warn=disa=CVTDIFTYPES -
+        [--.mercurial.cext]osutil.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial") -
+	/warn=disa=EXTRASEMI -
+        [--.mercurial.cext]parsers.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+		"../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard]zstd.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]frameparams.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressobj.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+		"../python-zstandard/zstd/common") -
+        [-.python-zstandard.c-ext]compressor.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]bufferutil.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]decompressoriterator.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+		"../python-zstandard/zstd/common") -
+        [-.python-zstandard.c-ext]decompressor.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]frameparams.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]constants.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]decompressionreader.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]decompressionwriter.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressiondict.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]decompressobj.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressionwriter.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressionreader.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressoriterator.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressionparams.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.c-ext]compressionchunker.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+		"../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.zstd.common]zstd_common.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder") -
+        [-.python-zstandard.zstd.common]error_private.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+		"../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_compress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+		"../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_ldm.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_opt.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_lazy.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.compress]huf_compress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.common]entropy_common.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.compress]fse_compress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_fast.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.common]fse_decompress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.compress]hist.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_double_fast.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.common]pool.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.common]xxhash.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_compress_sequences.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstd_compress_literals.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.decompress]zstd_ddict.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.decompress]zstd_decompress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.decompress]huf_decompress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.decompress]zstd_decompress_block.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+	/warn=disa=TOOFEWACTUALS -
+        [-.python-zstandard.zstd.compress]zstdmt_compress.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.dictBuilder]cover.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.dictBuilder]fastcover.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.dictBuilder]divsufsort.c
+$ cc/name=(short,as_is)-
+        /incl=("/python$root/include", "../../mercurial", -
+                "../python-zstandard/c-ext", "../python-zstandard/zstd", -
+                "../python-zstandard/zstd/dictBuilder", -
+                "../python-zstandard/zstd/common") -
+        [-.python-zstandard.zstd.dictBuilder]zdict.c
+$ 
+$ link/share=base65.exe sys$input/opt
+GSMATCH=lequal,1,1000
+case_sensitive=YES
+SYMBOL_VECTOR = (PyInit_base85=PROCEDURE)
+SYMBOL_VECTOR = (PYINIT_BASE85/PyInit_base85=PROCEDURE)
+base85.obj
+python$shr/share
+case_sensitive=NO
+$
+$ link/share=bdiff.exe sys$input/opt
+GSMATCH=lequal,1,1000
+case_sensitive=YES
+SYMBOL_VECTOR = (PyInit_bdiff=PROCEDURE)
+SYMBOL_VECTOR = (PYINIT_BDIFF/PyInit_bdiff=PROCEDURE)
+bdiff.obj
+bdiff-mod.obj
+xdiffi.obj
+xprepare.obj
+xutils.obj
+python$shr/share
+case_sensitive=NO
+$
+$ link/share=mpatch.exe sys$input/opt
+GSMATCH=lequal,1,1000
+case_sensitive=YES
+SYMBOL_VECTOR = (PyInit_mpatch=PROCEDURE)
+SYMBOL_VECTOR = (PYINIT_MPATCH/PyInit_mpatch=PROCEDURE)
+mpatch.obj
+mpatch-mod.obj
+python$shr/share
+case_sensitive=NO
+$
+$ link/share=osutil.exe sys$input/opt
+GSMATCH=lequal,1,1000
+case_sensitive=YES
+SYMBOL_VECTOR = (PyInit_osutil=PROCEDURE)
+SYMBOL_VECTOR = (PYINIT_OSUTIL/PyInit_osutil=PROCEDURE)
+osutil.obj
+python$shr/share
+case_sensitive=NO
+$
+$ link/share=parsers.exe sys$input/opt
+GSMATCH=lequal,1,1000
+case_sensitive=YES
+SYMBOL_VECTOR = (PyInit_parsers=PROCEDURE)
+SYMBOL_VECTOR = (PYINIT_PARSERS/PyInit_parsers=PROCEDURE)
+parsers.obj
+dirs.obj
+charencode.obj
+pathencode.obj
+revlog.obj
+manifest.obj
+python$shr/share
+case_sensitive=NO
+$
+$ link/share=zstd.exe sys$input/opt
+GSMATCH=lequal,1,1000
+case_sensitive=YES
+SYMBOL_VECTOR = (PyInit_zstd=PROCEDURE)
+SYMBOL_VECTOR = (PYINIT_ZSTD/PyInit_zstd=PROCEDURE)
+zstd.obj
+frameparams.obj
+decompressobj.obj
+zstd_common.obj
+compressionreader.obj
+compressionwriter.obj
+compressoriterator.obj
+zstd_compress.obj
+zstd_opt.obj
+zstd_lazy.obj
+huf_compress.obj
+entropy_common.obj
+fse_compress.obj
+fse_decompress.obj
+zstd_fast.obj
+zstd_ldm.obj
+hist.obj
+zstd_double_fast.obj
+zstd_compress_sequences.obj
+zstd_compress_literals.obj
+zstdmt_compress.obj
+compressiondict.obj
+zstd_ddict.obj
+zstd_decompress.obj
+zstd_decompress_block.obj
+zdict.obj
+huf_decompress.obj
+compressionparams.obj
+compressobj.obj
+decompressionreader.obj
+compressionchunker.obj
+decompressionwriter.obj
+decompressor.obj
+decompressoriterator.obj
+compressor.obj
+divsufsort.obj
+bufferutil.obj
+constants.obj
+error_private.obj
+cover.obj
+fastcover.obj
+pool.obj
+xxhash.obj
+python$shr/share
+case_sensitive=NO
+$
+$ delete/noconf *.obj;
+$ rename zstd.exe [--.mercurial]/log
+$ rename *.exe [--.mercurial.cext]/log
+$
+$ exit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/vms/hgeditor.com	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,12 @@
+$!
+$! Call OpenVMS editor with a conversion from Unix filename syntax to OpenVMS syntax 
+$!
+$ set proc/par=extend
+$ ufile = p1
+$ tovms :== $ MERCURIAL_ROOT:[vms]tovms
+$ tovms 'ufile'
+$ vfile = tmpfn
+$ deassign sys$input
+$ edit 'vfile'
+$ purge/nolog 'vfile'
+$ exit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/vms/hgmerge.com	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,76 @@
+$!
+$! Custom merge tool to help solve merge conflict in OpenVMS
+$! We recommand to solve this on other system
+$!
+$ set proc/par=extend
+$ mine = p1
+$ orig = p2
+$ theirs = p3
+$ tovms :== $ MERCURIAL_ROOT:[vms]tovms
+$ merged = p1 + ".hgmerge"
+$ tovms 'merged'
+$ merged = tmpfn
+$
+$ define DECC$UNIX_LEVEL 90
+$ gdiff3 :== $ MERCURIAL_ROOT:[vms]gdiff3
+$ gdiff == "$ MERCURIAL_ROOT:[VMS]gdiff"
+$! gdiff -u 'orig' 'mine'
+$! gdiff -u 'orig' 'theirs'
+$ if (f$search("''merged'") .nes. "") then -
+          delete 'merged';*
+$ define sys$output 'merged'
+$ gdiff3 -"L" mine -"L" original -"L" theirs -"E" -m 'mine' 'orig' 'theirs'
+$ status = $status
+$ deassign sys$output
+$ convert/fdl=mercurial_root:[vms]stmlf.fdl 'merged' 'merged'
+$ purge/nolog 'merged'
+$! No conflicts found.  Merge done.
+$ if status .eqs. "%X006C8009"
+$ then
+$   tovms 'p1'
+$   mine = tmpfn
+$   rename 'merged' 'mine'
+$   purge/nolog 'mine'
+$   write sys$output "Merged ''mine'"
+$   exit 1
+$ endif
+$
+$! In all other cases, diff3 has found conflicts, added the proper conflict
+$! markers to the merged file and we should now edit this file.  Fire up an
+$! editor with the merged file and let the user manually resolve the conflicts.
+$! When the editor exits successfully, there should be no conflict markers in
+$! the merged file, otherwise we consider this merge failed.
+$
+$ if status .eqs. "%X006C8013"
+$ then
+$   deassign sys$input
+$   edit 'merged'
+$   open fi 'merged'
+$   loop:
+$     read fi srec/end=endloop
+$     rec7 = f$extract(0, 7, srec)
+$     if rec7 .eqs. "<<<<<<<" then goto conflict
+$     if rec7 .eqs. "|||||||" then goto conflict
+$     if rec7 .eqs. "=======" then goto conflict
+$     if rec7 .eqs. ">>>>>>>" then goto conflict
+$     goto loop
+$   endloop:
+$   close fi
+$   tovms 'p1'
+$   mine = tmpfn
+$   rename 'merged' 'mine'
+$   purge/nolog 'mine'
+$   exit
+$ endif
+$ if (f$search("''merged'") .nes. "") then -
+          delete 'merged';*
+$ write sys$output "serious diff3 error, while trying to merge ''mine'"
+$ exit 44
+$ 
+$ conflict:
+$ close fi
+$ if (f$search("''merged'") .nes. "") then -
+          delete 'merged';*
+$ write sys$output -
+ "conflict markers still found in the working-copy.  Merge aborted for ''mine'"
+$ exit 44
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/vms/logicals.com	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,20 @@
+$!
+$! Define mercurial_root logical 
+$!   p1: define parameter (/system for example)
+$!
+$ proc = f$environment("PROCEDURE")
+$ proc = f$parse(proc,"sys$disk:[]",,,"NO_CONCEAL")
+$ cur_dev = f$parse(proc,,,"DEVICE","SYNTAX_ONLY")
+$ cur_dir = f$parse(proc,,,"DIRECTORY","SYNTAX_ONLY")
+$ cur_dir = f$extract(1,f$length(cur_dir)-2,cur_dir)
+$ cur_dir = cur_dir - "["
+$ cur_dir = cur_dir - "]"
+$ cur_dir = cur_dir - "<"
+$ cur_dir = cur_dir - ">"
+$
+$! remove trailing .VMS
+$ root_dir = f$extract(0,f$length(cur_dir)-4,cur_dir)
+$
+$ define/nolog 'p1' /trans=concealed mercurial_root 'cur_dev'['root_dir'.]
+$
+$ exit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/vms/setup.com	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,7 @@
+$!
+$! Set hg and hgeditor symbol
+$!
+$ HG == "$ PYTHON$ROOT:[BIN]PYTHON /MERCURIAL_ROOT/HG"
+$ HGEDITOR == "@MERCURIAL_ROOT:[VMS]HGEDITOR"
+$
+$ exit
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/vms/startup.com	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,12 @@
+$!
+$! Mercurial startup file
+$!
+$ proc = f$environment("PROCEDURE")
+$ cur_dev = f$parse(proc,,,"DEVICE","SYNTAX_ONLY")
+$ cur_dir = f$parse(proc,,,"DIRECTORY","SYNTAX_ONLY")
+$!
+$! Define logicals
+$!
+$ @'cur_dev''cur_dir'logicals "/system/exec"
+$
+$ exit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/openvms/vms/stmlf.fdl	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,7 @@
+!
+! Used by hgmerge.com to convert file to stream_lf record format
+!
+RECORD
+        BLOCK_SPAN              yes
+        CARRIAGE_CONTROL        carriage_return
+        FORMAT                  stream_lf
--- a/contrib/perf.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/perf.py	Tue Nov 07 15:21:11 2023 +0100
@@ -456,7 +456,7 @@
         return functools.partial(stub_timer, fm), fm
 
     # experimental config: perf.all-timing
-    displayall = ui.configbool(b"perf", b"all-timing", False)
+    displayall = ui.configbool(b"perf", b"all-timing", True)
 
     # experimental config: perf.run-limits
     limitspec = ui.configlist(b"perf", b"run-limits", [])
@@ -3479,7 +3479,7 @@
 
     # get a formatter
     fm = ui.formatter(b'perf', opts)
-    displayall = ui.configbool(b"perf", b"all-timing", False)
+    displayall = ui.configbool(b"perf", b"all-timing", True)
 
     # print individual details if requested
     if opts['details']:
@@ -3549,7 +3549,10 @@
     timings = []
     tr = _faketr()
     with _temprevlog(ui, orig, startrev) as dest:
-        dest._lazydeltabase = lazydeltabase
+        if hasattr(dest, "delta_config"):
+            dest.delta_config.lazy_delta_base = lazydeltabase
+        else:
+            dest._lazydeltabase = lazydeltabase
         revs = list(orig.revs(startrev, stoprev))
         total = len(revs)
         topic = 'adding'
@@ -3717,11 +3720,15 @@
 
     rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
 
-    # _chunkraw was renamed to _getsegmentforrevs.
+    # - _chunkraw was renamed to _getsegmentforrevs
+    # - _getsegmentforrevs was moved on the inner object
     try:
-        segmentforrevs = rl._getsegmentforrevs
+        segmentforrevs = rl._inner.get_segment_for_revs
     except AttributeError:
-        segmentforrevs = rl._chunkraw
+        try:
+            segmentforrevs = rl._getsegmentforrevs
+        except AttributeError:
+            segmentforrevs = rl._chunkraw
 
     # Verify engines argument.
     if engines:
@@ -3744,62 +3751,101 @@
 
     revs = list(rl.revs(startrev, len(rl) - 1))
 
-    def rlfh(rl):
-        if rl._inline:
+    @contextlib.contextmanager
+    def reading(rl):
+        if getattr(rl, 'reading', None) is not None:
+            with rl.reading():
+                yield None
+        elif rl._inline:
             indexfile = getattr(rl, '_indexfile', None)
             if indexfile is None:
                 # compatibility with <= hg-5.8
                 indexfile = getattr(rl, 'indexfile')
-            return getsvfs(repo)(indexfile)
+            yield getsvfs(repo)(indexfile)
         else:
             datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
-            return getsvfs(repo)(datafile)
+            yield getsvfs(repo)(datafile)
+
+    if getattr(rl, 'reading', None) is not None:
+
+        @contextlib.contextmanager
+        def lazy_reading(rl):
+            with rl.reading():
+                yield
+
+    else:
+
+        @contextlib.contextmanager
+        def lazy_reading(rl):
+            yield
 
     def doread():
         rl.clearcaches()
         for rev in revs:
-            segmentforrevs(rev, rev)
+            with lazy_reading(rl):
+                segmentforrevs(rev, rev)
 
     def doreadcachedfh():
         rl.clearcaches()
-        fh = rlfh(rl)
-        for rev in revs:
-            segmentforrevs(rev, rev, df=fh)
+        with reading(rl) as fh:
+            if fh is not None:
+                for rev in revs:
+                    segmentforrevs(rev, rev, df=fh)
+            else:
+                for rev in revs:
+                    segmentforrevs(rev, rev)
 
     def doreadbatch():
         rl.clearcaches()
-        segmentforrevs(revs[0], revs[-1])
+        with lazy_reading(rl):
+            segmentforrevs(revs[0], revs[-1])
 
     def doreadbatchcachedfh():
         rl.clearcaches()
-        fh = rlfh(rl)
-        segmentforrevs(revs[0], revs[-1], df=fh)
+        with reading(rl) as fh:
+            if fh is not None:
+                segmentforrevs(revs[0], revs[-1], df=fh)
+            else:
+                segmentforrevs(revs[0], revs[-1])
 
     def dochunk():
         rl.clearcaches()
-        fh = rlfh(rl)
-        for rev in revs:
-            rl._chunk(rev, df=fh)
+        # chunk used to be available directly on the revlog
+        _chunk = getattr(rl, '_inner', rl)._chunk
+        with reading(rl) as fh:
+            if fh is not None:
+                for rev in revs:
+                    _chunk(rev, df=fh)
+            else:
+                for rev in revs:
+                    _chunk(rev)
 
     chunks = [None]
 
     def dochunkbatch():
         rl.clearcaches()
-        fh = rlfh(rl)
-        # Save chunks as a side-effect.
-        chunks[0] = rl._chunks(revs, df=fh)
+        _chunks = getattr(rl, '_inner', rl)._chunks
+        with reading(rl) as fh:
+            if fh is not None:
+                # Save chunks as a side-effect.
+                chunks[0] = _chunks(revs, df=fh)
+            else:
+                # Save chunks as a side-effect.
+                chunks[0] = _chunks(revs)
 
     def docompress(compressor):
         rl.clearcaches()
 
+        compressor_holder = getattr(rl, '_inner', rl)
+
         try:
             # Swap in the requested compression engine.
-            oldcompressor = rl._compressor
-            rl._compressor = compressor
+            oldcompressor = compressor_holder._compressor
+            compressor_holder._compressor = compressor
             for chunk in chunks[0]:
                 rl.compress(chunk)
         finally:
-            rl._compressor = oldcompressor
+            compressor_holder._compressor = oldcompressor
 
     benches = [
         (lambda: doread(), b'read'),
@@ -3857,13 +3903,29 @@
 
     # _chunkraw was renamed to _getsegmentforrevs.
     try:
-        segmentforrevs = r._getsegmentforrevs
+        segmentforrevs = r._inner.get_segment_for_revs
     except AttributeError:
-        segmentforrevs = r._chunkraw
+        try:
+            segmentforrevs = r._getsegmentforrevs
+        except AttributeError:
+            segmentforrevs = r._chunkraw
 
     node = r.lookup(rev)
     rev = r.rev(node)
 
+    if getattr(r, 'reading', None) is not None:
+
+        @contextlib.contextmanager
+        def lazy_reading(r):
+            with r.reading():
+                yield
+
+    else:
+
+        @contextlib.contextmanager
+        def lazy_reading(r):
+            yield
+
     def getrawchunks(data, chain):
         start = r.start
         length = r.length
@@ -3897,7 +3959,8 @@
         if not cache:
             r.clearcaches()
         for item in slicedchain:
-            segmentforrevs(item[0], item[-1])
+            with lazy_reading(r):
+                segmentforrevs(item[0], item[-1])
 
     def doslice(r, chain, size):
         for s in slicechunk(r, chain, targetsize=size):
@@ -3935,13 +3998,19 @@
 
     size = r.length(rev)
     chain = r._deltachain(rev)[0]
-    if not getattr(r, '_withsparseread', False):
+
+    with_sparse_read = False
+    if hasattr(r, 'data_config'):
+        with_sparse_read = r.data_config.with_sparse_read
+    elif hasattr(r, '_withsparseread'):
+        with_sparse_read = r._withsparseread
+    if with_sparse_read:
         slicedchain = (chain,)
     else:
         slicedchain = tuple(slicechunk(r, chain, targetsize=size))
     data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
     rawchunks = getrawchunks(data, slicedchain)
-    bins = r._chunks(chain)
+    bins = r._inner._chunks(chain)
     text = bytes(bins[0])
     bins = bins[1:]
     text = mdiff.patches(text, bins)
@@ -3952,7 +4021,7 @@
         (lambda: doread(chain), b'read'),
     ]
 
-    if getattr(r, '_withsparseread', False):
+    if with_sparse_read:
         slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
         benches.append(slicing)
 
@@ -4541,7 +4610,8 @@
                 )
             return orig(repo, cmd, file_, opts)
 
-        extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
+        name = _sysstr(b'openrevlog')
+        extensions.wrapfunction(cmdutil, name, openrevlog)
 
 
 @command(
--- a/contrib/python3-whitelist	Mon Nov 06 15:38:27 2023 +0100
+++ b/contrib/python3-whitelist	Tue Nov 07 15:21:11 2023 +0100
@@ -337,9 +337,6 @@
 test-import.t
 test-imports-checker.t
 test-incoming-outgoing.t
-test-infinitepush-bundlestore.t
-test-infinitepush-ci.t
-test-infinitepush.t
 test-inherit-mode.t
 test-init.t
 test-install.t
--- a/hgext/absorb.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/absorb.py	Tue Nov 07 15:21:11 2023 +0100
@@ -299,7 +299,7 @@
         4. read results from "finalcontents", or call getfinalcontent
     """
 
-    def __init__(self, fctxs, path, ui=None, opts=None):
+    def __init__(self, fctxs, path, ui=None, **opts):
         """([fctx], ui or None) -> None
 
         fctxs should be linear, and sorted by topo order - oldest first.
@@ -308,7 +308,7 @@
         self.fctxs = fctxs
         self.path = path
         self.ui = ui or nullui()
-        self.opts = opts or {}
+        self.opts = opts
 
         # following fields are built from fctxs. they exist for perf reason
         self.contents = [f.data() for f in fctxs]
@@ -375,7 +375,7 @@
                     % (short(self.fctxs[idx].node()), a1, a2, len(blines))
                 )
             self.linelog.replacelines(rev, a1, a2, b1, b2)
-        if self.opts.get(b'edit_lines', False):
+        if self.opts.get('edit_lines', False):
             self.finalcontents = self._checkoutlinelogwithedits()
         else:
             self.finalcontents = self._checkoutlinelog()
@@ -668,7 +668,7 @@
         4. call commit, to commit changes to hg database
     """
 
-    def __init__(self, stack, ui=None, opts=None):
+    def __init__(self, stack, ui=None, **opts):
         """([ctx], ui or None) -> None
 
         stack: should be linear, and sorted by topo order - oldest first.
@@ -676,7 +676,7 @@
         """
         assert stack
         self.ui = ui or nullui()
-        self.opts = opts or {}
+        self.opts = opts
         self.stack = stack
         self.repo = stack[-1].repo().unfiltered()
 
@@ -696,7 +696,7 @@
         self.paths = []
         # but if --edit-lines is used, the user may want to edit files
         # even if they are not modified
-        editopt = self.opts.get(b'edit_lines')
+        editopt = self.opts.get('edit_lines')
         if not self.status.modified and editopt and match:
             interestingpaths = match.files()
         else:
@@ -720,7 +720,7 @@
                 continue
             seenfctxs.update(fctxs[1:])
             self.fctxmap[path] = ctx2fctx
-            fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts)
+            fstate = filefixupstate(fctxs, path, ui=self.ui, **self.opts)
             if fm is not None:
                 fm.startitem()
                 fm.plain(b'showing changes for ')
@@ -873,7 +873,7 @@
         # be slow. in absorb's case, no need to invalidate fsmonitorstate.
         noop = lambda: 0
         restore = noop
-        if util.safehasattr(dirstate, '_fsmonitorstate'):
+        if hasattr(dirstate, '_fsmonitorstate'):
             bak = dirstate._fsmonitorstate.invalidate
 
             def restore():
@@ -1009,7 +1009,7 @@
     return overlaycontext(memworkingcopy, ctx)
 
 
-def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None):
+def absorb(ui, repo, stack=None, targetctx=None, pats=None, **opts):
     """pick fixup chunks from targetctx, apply them to stack.
 
     if targetctx is None, the working copy context will be used.
@@ -1036,22 +1036,21 @@
         targetctx = repo[None]
     if pats is None:
         pats = ()
-    if opts is None:
-        opts = {}
-    state = fixupstate(stack, ui=ui, opts=opts)
-    matcher = scmutil.match(targetctx, pats, opts)
-    if opts.get(b'interactive'):
+
+    state = fixupstate(stack, ui=ui, **opts)
+    matcher = scmutil.match(targetctx, pats, pycompat.byteskwargs(opts))
+    if opts.get('interactive'):
         diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher)
         origchunks = patch.parsepatch(diff)
         chunks = cmdutil.recordfilter(ui, origchunks, matcher)[0]
         targetctx = overlaydiffcontext(stack[-1], chunks)
-    if opts.get(b'edit_lines'):
+    if opts.get('edit_lines'):
         # If we're going to open the editor, don't ask the user to confirm
         # first
-        opts[b'apply_changes'] = True
+        opts['apply_changes'] = True
     fm = None
-    if opts.get(b'print_changes') or not opts.get(b'apply_changes'):
-        fm = ui.formatter(b'absorb', opts)
+    if opts.get('print_changes') or not opts.get('apply_changes'):
+        fm = ui.formatter(b'absorb', pycompat.byteskwargs(opts))
     state.diffwith(targetctx, matcher, fm)
     if fm is not None:
         fm.startitem()
@@ -1074,9 +1073,9 @@
                 label=b'absorb.description',
             )
         fm.end()
-    if not opts.get(b'dry_run'):
+    if not opts.get('dry_run'):
         if (
-            not opts.get(b'apply_changes')
+            not opts.get('apply_changes')
             and state.ctxaffected
             and ui.promptchoice(
                 b"apply changes (y/N)? $$ &Yes $$ &No", default=1
@@ -1154,12 +1153,10 @@
 
     Returns 0 on success, 1 if all chunks were ignored and nothing amended.
     """
-    opts = pycompat.byteskwargs(opts)
-
     with repo.wlock(), repo.lock():
-        if not opts[b'dry_run']:
+        if not opts['dry_run']:
             cmdutil.checkunfinished(repo)
 
-        state = absorb(ui, repo, pats=pats, opts=opts)
+        state = absorb(ui, repo, pats=pats, **opts)
         if sum(s[0] for s in state.chunkstats.values()) == 0:
             return 1
--- a/hgext/automv.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/automv.py	Tue Nov 07 15:21:11 2023 +0100
@@ -56,9 +56,8 @@
 
 def mvcheck(orig, ui, repo, *pats, **opts):
     """Hook to check for moves at commit time"""
-    opts = pycompat.byteskwargs(opts)
     renames = None
-    disabled = opts.pop(b'no_automv', False)
+    disabled = opts.pop('no_automv', False)
     with repo.wlock():
         if not disabled:
             threshold = ui.configint(b'automv', b'similarity')
@@ -67,7 +66,9 @@
                     _(b'automv.similarity must be between 0 and 100')
                 )
             if threshold > 0:
-                match = scmutil.match(repo[None], pats, opts)
+                match = scmutil.match(
+                    repo[None], pats, pycompat.byteskwargs(opts)
+                )
                 added, removed = _interestingfiles(repo, match)
                 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
                 renames = _findrenames(
@@ -82,7 +83,7 @@
                 # current extension structure, and this is not worse than what
                 # happened before.
                 scmutil._markchanges(repo, (), (), renames)
-        return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
+        return orig(ui, repo, *pats, **opts)
 
 
 def _interestingfiles(repo, matcher):
--- a/hgext/beautifygraph.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/beautifygraph.py	Tue Nov 07 15:21:11 2023 +0100
@@ -103,5 +103,5 @@
         )
         return
 
-    extensions.wrapfunction(graphmod, b'outputgraph', outputprettygraph)
-    extensions.wrapfunction(templatekw, b'getgraphnode', getprettygraphnode)
+    extensions.wrapfunction(graphmod, 'outputgraph', outputprettygraph)
+    extensions.wrapfunction(templatekw, 'getgraphnode', getprettygraphnode)
--- a/hgext/blackbox.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/blackbox.py	Tue Nov 07 15:21:11 2023 +0100
@@ -67,48 +67,6 @@
 cmdtable = {}
 command = registrar.command(cmdtable)
 
-configtable = {}
-configitem = registrar.configitem(configtable)
-
-configitem(
-    b'blackbox',
-    b'dirty',
-    default=False,
-)
-configitem(
-    b'blackbox',
-    b'maxsize',
-    default=b'1 MB',
-)
-configitem(
-    b'blackbox',
-    b'logsource',
-    default=False,
-)
-configitem(
-    b'blackbox',
-    b'maxfiles',
-    default=7,
-)
-configitem(
-    b'blackbox',
-    b'track',
-    default=lambda: [b'*'],
-)
-# Debug config option that also display the blackbox output on stderr
-# (in addition to writing it to disk)
-configitem(
-    b'blackbox',
-    b'debug.to-stderr',
-    default=False,
-)
-configitem(
-    b'blackbox',
-    b'ignore',
-    default=lambda: [b'chgserver', b'cmdserver', b'extension'],
-)
-configitem(b'blackbox', b'date-format', default=b'')
-
 _lastlogger = loggingutil.proxylogger()
 
 
--- a/hgext/bookflow.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/bookflow.py	Tue Nov 07 15:21:11 2023 +0100
@@ -117,8 +117,8 @@
 
 
 def uisetup(ui):
-    extensions.wrapfunction(bookmarks, b'update', bookmarks_update)
-    extensions.wrapfunction(bookmarks, b'addbookmarks', bookmarks_addbookmarks)
+    extensions.wrapfunction(bookmarks, 'update', bookmarks_update)
+    extensions.wrapfunction(bookmarks, 'addbookmarks', bookmarks_addbookmarks)
     extensions.wrapcommand(commands.table, b'commit', commands_commit)
     extensions.wrapcommand(commands.table, b'pull', commands_pull)
     if not ui.configbool(MY_NAME, b'enable-branches'):
--- a/hgext/bugzilla.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/bugzilla.py	Tue Nov 07 15:21:11 2023 +0100
@@ -766,13 +766,13 @@
 # inheritance with a new-style class.
 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
     def __init__(self, use_datetime=0):
-        if util.safehasattr(xmlrpclib.Transport, "__init__"):
+        if hasattr(xmlrpclib.Transport, "__init__"):
             xmlrpclib.Transport.__init__(self, use_datetime)
 
 
 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
     def __init__(self, use_datetime=0):
-        if util.safehasattr(xmlrpclib.Transport, "__init__"):
+        if hasattr(xmlrpclib.Transport, "__init__"):
             xmlrpclib.SafeTransport.__init__(self, use_datetime)
 
 
--- a/hgext/children.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/children.py	Tue Nov 07 15:21:11 2023 +0100
@@ -67,8 +67,7 @@
     See :hg:`help log` and :hg:`help revsets.children`.
 
     """
-    opts = pycompat.byteskwargs(opts)
-    rev = opts.get(b'rev')
+    rev = opts.get('rev')
     ctx = logcmdutil.revsingle(repo, rev)
     if file_:
         fctx = repo.filectx(file_, changeid=ctx.rev())
@@ -76,7 +75,9 @@
     else:
         childctxs = ctx.children()
 
-    displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+    displayer = logcmdutil.changesetdisplayer(
+        ui, repo, pycompat.byteskwargs(opts)
+    )
     for cctx in childctxs:
         displayer.show(cctx)
     displayer.close()
--- a/hgext/churn.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/churn.py	Tue Nov 07 15:21:11 2023 +0100
@@ -52,18 +52,17 @@
 
 def countrate(ui, repo, amap, *pats, **opts):
     """Calculate stats"""
-    opts = pycompat.byteskwargs(opts)
-    if opts.get(b'dateformat'):
+    if opts.get('dateformat'):
 
         def getkey(ctx):
             t, tz = ctx.date()
             date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
             return encoding.strtolocal(
-                date.strftime(encoding.strfromlocal(opts[b'dateformat']))
+                date.strftime(encoding.strfromlocal(opts['dateformat']))
             )
 
     else:
-        tmpl = opts.get(b'oldtemplate') or opts.get(b'template')
+        tmpl = opts.get('oldtemplate') or opts.get('template')
         tmpl = logcmdutil.maketemplater(ui, repo, tmpl)
 
         def getkey(ctx):
@@ -80,7 +79,7 @@
         rev = ctx.rev()
         key = getkey(ctx).strip()
         key = amap.get(key, key)  # alias remap
-        if opts.get(b'changesets'):
+        if opts.get('changesets'):
             rate[key] = (rate.get(key, (0,))[0] + 1, 0)
         else:
             parents = ctx.parents()
@@ -96,11 +95,11 @@
 
     wopts = logcmdutil.walkopts(
         pats=pats,
-        opts=opts,
-        revspec=opts[b'rev'],
-        date=opts[b'date'],
-        include_pats=opts[b'include'],
-        exclude_pats=opts[b'exclude'],
+        opts=pycompat.byteskwargs(opts),
+        revspec=opts['rev'],
+        date=opts['date'],
+        include_pats=opts['include'],
+        exclude_pats=opts['exclude'],
     )
     revs, makefilematcher = logcmdutil.makewalker(repo, wopts)
     for ctx in scmutil.walkchangerevs(repo, revs, makefilematcher, prep):
--- a/hgext/clonebundles.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/clonebundles.py	Tue Nov 07 15:21:11 2023 +0100
@@ -349,7 +349,7 @@
 
 
 def extsetup(ui):
-    extensions.wrapfunction(wireprotov1server, b'_capabilities', capabilities)
+    extensions.wrapfunction(wireprotov1server, '_capabilities', capabilities)
 
 
 # logic for bundle auto-generation
@@ -987,7 +987,7 @@
         @localrepo.unfilteredmethod
         def clonebundles_lock(self, wait=True):
             '''Lock the repository file related to clone bundles'''
-            if not util.safehasattr(self, '_cb_lock_ref'):
+            if not hasattr(self, '_cb_lock_ref'):
                 self._cb_lock_ref = None
             l = self._currentlock(self._cb_lock_ref)
             if l is not None:
--- a/hgext/closehead.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/closehead.py	Tue Nov 07 15:21:11 2023 +0100
@@ -54,19 +54,16 @@
             text=message,
             files=[],
             filectxfn=None,
-            user=opts.get(b'user'),
-            date=opts.get(b'date'),
+            user=opts.get('user'),
+            date=opts.get('date'),
             extra=extra,
         )
-        tr = repo.transaction(b'commit')
-        ret = repo.commitctx(cctx, True)
-        bookmarks.update(repo, [rev, None], ret)
-        cctx.markcommitted(ret)
-        tr.close()
+        with repo.transaction(b'commit'):
+            ret = repo.commitctx(cctx, True)
+            bookmarks.update(repo, [rev, None], ret)
+            cctx.markcommitted(ret)
 
-    opts = pycompat.byteskwargs(opts)
-
-    revs += tuple(opts.get(b'rev', []))
+    revs += tuple(opts.get('rev', []))
     revs = logcmdutil.revrange(repo, revs)
 
     if not revs:
@@ -80,7 +77,7 @@
         if rev not in heads:
             raise error.Abort(_(b'revision is not an open head: %d') % rev)
 
-    message = cmdutil.logmessage(ui, opts)
+    message = cmdutil.logmessage(ui, pycompat.byteskwargs(opts))
     if not message:
         raise error.Abort(_(b"no commit message specified with -l or -m"))
     extra = {b'close': b'1'}
--- a/hgext/commitextras.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/commitextras.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,7 +16,6 @@
     error,
     extensions,
     registrar,
-    util,
 )
 
 cmdtable = {}
@@ -52,7 +51,7 @@
 
 
 def _commit(orig, ui, repo, *pats, **opts):
-    if util.safehasattr(repo, 'unfiltered'):
+    if hasattr(repo, 'unfiltered'):
         repo = repo.unfiltered()
 
     class repoextra(repo.__class__):
--- a/hgext/convert/convcmd.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/convert/convcmd.py	Tue Nov 07 15:21:11 2023 +0100
@@ -435,7 +435,13 @@
             """Sort revisions by date."""
 
             def getdate(n):
-                return dateutil.parsedate(self.commitcache[n].date)
+                commit = self.commitcache[n]
+                # The other entries are here as tie breaker for stability
+                return (
+                    dateutil.parsedate(commit.date),
+                    commit.rev,
+                    commit.branch,
+                )
 
             return keysorter(getdate)
 
--- a/hgext/convert/cvs.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/convert/cvs.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,7 +12,6 @@
 
 from mercurial.i18n import _
 from mercurial.pycompat import (
-    getattr,
     open,
 )
 from mercurial import (
--- a/hgext/convert/cvsps.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/convert/cvsps.py	Tue Nov 07 15:21:11 2023 +0100
@@ -198,9 +198,9 @@
             oldlog = pickle.load(open(cachefile, b'rb'))
             for e in oldlog:
                 if not (
-                    util.safehasattr(e, b'branchpoints')
-                    and util.safehasattr(e, b'commitid')
-                    and util.safehasattr(e, b'mergepoint')
+                    hasattr(e, b'branchpoints')
+                    and hasattr(e, b'commitid')
+                    and hasattr(e, b'mergepoint')
                 ):
                     ui.status(_(b'ignoring old cache\n'))
                     oldlog = []
--- a/hgext/convert/hg.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/convert/hg.py	Tue Nov 07 15:21:11 2023 +0100
@@ -298,8 +298,9 @@
         parents = pl
         nparents = len(parents)
         if self.filemapmode and nparents == 1:
-            m1node = self.repo.changelog.read(bin(parents[0]))[0]
             parent = parents[0]
+            p1_node = bin(parent)
+            m1node = self.repo.changelog.changelogrevision(p1_node).manifest
 
         if len(parents) < 2:
             parents.append(self.repo.nullid)
--- a/hgext/convert/transport.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/convert/transport.py	Tue Nov 07 15:21:11 2023 +0100
@@ -27,9 +27,6 @@
 Pool = svn.core.Pool
 SubversionException = svn.core.SubversionException
 
-from mercurial.pycompat import getattr
-from mercurial import util
-
 # Some older versions of the Python bindings need to be
 # explicitly initialized. But what we want to do probably
 # won't work worth a darn against those libraries anyway!
@@ -63,7 +60,7 @@
                 if p:
                     providers.append(p)
     else:
-        if util.safehasattr(svn.client, b'get_windows_simple_provider'):
+        if hasattr(svn.client, 'get_windows_simple_provider'):
             providers.append(svn.client.get_windows_simple_provider(pool))
 
     return svn.core.svn_auth_open(providers, pool)
@@ -85,7 +82,7 @@
         self.password = b''
 
         # Only Subversion 1.4 has reparent()
-        if ra is None or not util.safehasattr(svn.ra, b'reparent'):
+        if ra is None or not hasattr(svn.ra, 'reparent'):
             self.client = svn.client.create_context(self.pool)
             ab = _create_auth_baton(self.pool)
             self.client.auth_baton = ab
--- a/hgext/factotum.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/factotum.py	Tue Nov 07 15:21:11 2023 +0100
@@ -48,7 +48,6 @@
 
 import os
 from mercurial.i18n import _
-from mercurial.pycompat import setattr
 from mercurial.utils import procutil
 from mercurial import (
     error,
--- a/hgext/fastannotate/commands.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fastannotate/commands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -19,7 +19,6 @@
     pycompat,
     registrar,
     scmutil,
-    util,
 )
 
 from . import (
@@ -218,7 +217,7 @@
     paths = list(_matchpaths(repo, rev, pats, opts, aopts))
 
     # for client, prefetch from the server
-    if util.safehasattr(repo, 'prefetchfastannotate'):
+    if hasattr(repo, 'prefetchfastannotate'):
         repo.prefetchfastannotate(paths)
 
     for path in paths:
@@ -273,7 +272,7 @@
 
     # check if we need to do prefetch (client-side)
     rev = opts.get('rev')
-    if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
+    if hasattr(repo, 'prefetchfastannotate') and rev is not None:
         paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts)))
         repo.prefetchfastannotate(paths)
 
@@ -320,7 +319,7 @@
     ctx = logcmdutil.revsingle(repo, rev)
     m = scmutil.match(ctx, pats, opts)
     paths = list(ctx.walk(m))
-    if util.safehasattr(repo, 'prefetchfastannotate'):
+    if hasattr(repo, 'prefetchfastannotate'):
         # client
         if opts.get(b'REV'):
             raise error.Abort(_(b'--rev cannot be used for client'))
--- a/hgext/fastannotate/context.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fastannotate/context.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,9 +12,7 @@
 
 from mercurial.i18n import _
 from mercurial.pycompat import (
-    getattr,
     open,
-    setattr,
 )
 from mercurial.node import (
     bin,
@@ -151,7 +149,10 @@
 
 def hashdiffopts(diffopts):
     diffoptstr = stringutil.pprint(
-        sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults)
+        sorted(
+            (k, getattr(diffopts, pycompat.sysstr(k)))
+            for k in mdiff.diffopts.defaults
+        )
     )
     return hex(hashutil.sha1(diffoptstr).digest())[:6]
 
@@ -167,13 +168,12 @@
     """
 
     defaults = {
-        b'diffopts': None,
-        b'followrename': True,
-        b'followmerge': True,
+        'diffopts': None,
+        'followrename': True,
+        'followmerge': True,
     }
 
     def __init__(self, **opts):
-        opts = pycompat.byteskwargs(opts)
         for k, v in self.defaults.items():
             setattr(self, k, opts.get(k, v))
 
@@ -322,7 +322,7 @@
                     b'(resolved fctx: %s)\n'
                     % (
                         self.path,
-                        stringutil.pprint(util.safehasattr(revfctx, b'node')),
+                        stringutil.pprint(hasattr(revfctx, 'node')),
                     )
                 )
             return self.annotatedirectly(revfctx, showpath, showlines)
--- a/hgext/fastannotate/protocol.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fastannotate/protocol.py	Tue Nov 07 15:21:11 2023 +0100
@@ -101,7 +101,7 @@
 
 def serveruisetup(ui):
     _registerwireprotocommand()
-    extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
+    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
 
 
 # client-side
--- a/hgext/fastannotate/support.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fastannotate/support.py	Tue Nov 07 15:21:11 2023 +0100
@@ -6,7 +6,6 @@
 # GNU General Public License version 2 or any later version.
 
 
-from mercurial.pycompat import getattr
 from mercurial import (
     context as hgcontext,
     dagop,
@@ -129,8 +128,8 @@
 
 
 def replacehgwebannotate():
-    extensions.wrapfunction(hgweb.webutil, b'annotate', _hgwebannotate)
+    extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
 
 
 def replacefctxannotate():
-    extensions.wrapfunction(hgcontext.basefilectx, b'annotate', _fctxannotate)
+    extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
--- a/hgext/fastexport.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fastexport.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 from mercurial import (
     error,
     logcmdutil,
-    pycompat,
     registrar,
     scmutil,
 )
@@ -46,9 +45,9 @@
             % rev
         )
     if user_person:
-        return b'"' + user_person + b'" <' + user_email + b'>'
+        return b'"%s" <%s>' % (user_person, user_email)
     else:
-        return b"<" + user_email + b">"
+        return b"<%s>" % user_email
 
 
 def convert_to_git_date(date):
@@ -176,22 +175,20 @@
     It can be piped into corresponding import routines like "git fast-import".
     Incremental dumps can be created by using marks files.
     """
-    opts = pycompat.byteskwargs(opts)
-
-    revs += tuple(opts.get(b"rev", []))
+    revs += tuple(opts.get("rev", []))
     if not revs:
         revs = scmutil.revrange(repo, [b":"])
     else:
         revs = logcmdutil.revrange(repo, revs)
     if not revs:
         raise error.Abort(_(b"no revisions matched"))
-    authorfile = opts.get(b"authormap")
+    authorfile = opts.get("authormap")
     if authorfile:
         authormap = convcmd.readauthormap(ui, authorfile)
     else:
         authormap = {}
 
-    import_marks = opts.get(b"import_marks")
+    import_marks = opts.get("import_marks")
     marks = {}
     if import_marks:
         with open(import_marks, "rb") as import_marks_file:
@@ -209,7 +206,7 @@
             export_commit(ui, repo, rev, marks, authormap)
             progress.increment()
 
-    export_marks = opts.get(b"export_marks")
+    export_marks = opts.get("export_marks")
     if export_marks:
         with open(export_marks, "wb") as export_marks_file:
             output_marks = [None] * len(marks)
--- a/hgext/fetch.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fetch.py	Tue Nov 07 15:21:11 2023 +0100
@@ -74,10 +74,9 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    date = opts.get(b'date')
+    date = opts.get('date')
     if date:
-        opts[b'date'] = dateutil.parsedate(date)
+        opts['date'] = dateutil.parsedate(date)
 
     parent = repo.dirstate.p1()
     branch = repo.dirstate.branch()
@@ -109,12 +108,12 @@
             )
 
         path = urlutil.get_unique_pull_path_obj(b'fetch', ui, source)
-        other = hg.peer(repo, opts, path)
+        other = hg.peer(repo, pycompat.byteskwargs(opts), path)
         ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
         revs = None
-        if opts[b'rev']:
+        if opts['rev']:
             try:
-                revs = [other.lookup(rev) for rev in opts[b'rev']]
+                revs = [other.lookup(rev) for rev in opts['rev']]
             except error.CapabilityError:
                 err = _(
                     b"other repository doesn't support revision lookup, "
@@ -162,7 +161,7 @@
             # By default, we consider the repository we're pulling
             # *from* as authoritative, so we merge our changes into
             # theirs.
-            if opts[b'switch_parent']:
+            if opts['switch_parent']:
                 firstparent, secondparent = newparent, newheads[0]
             else:
                 firstparent, secondparent = newheads[0], newparent
@@ -179,14 +178,12 @@
 
         if not err:
             # we don't translate commit messages
-            message = cmdutil.logmessage(ui, opts) or (
+            message = cmdutil.logmessage(ui, pycompat.byteskwargs(opts)) or (
                 b'Automated merge with %s' % urlutil.removeauth(other.url())
             )
-            editopt = opts.get(b'edit') or opts.get(b'force_editor')
+            editopt = opts.get('edit') or opts.get('force_editor')
             editor = cmdutil.getcommiteditor(edit=editopt, editform=b'fetch')
-            n = repo.commit(
-                message, opts[b'user'], opts[b'date'], editor=editor
-            )
+            n = repo.commit(message, opts['user'], opts['date'], editor=editor)
             ui.status(
                 _(b'new changeset %d:%s merges remote changes with local\n')
                 % (repo.changelog.rev(n), short(n))
--- a/hgext/fsmonitor/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fsmonitor/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -332,7 +332,7 @@
     matchfn = match.matchfn
     matchalways = match.always()
     dmap = self._map
-    if util.safehasattr(dmap, b'_map'):
+    if hasattr(dmap, b'_map'):
         # for better performance, directly access the inner dirstate map if the
         # standard dirstate implementation is in use.
         dmap = dmap._map
@@ -744,7 +744,7 @@
 def wrapdirstate(orig, self):
     ds = orig(self)
     # only override the dirstate when Watchman is available for the repo
-    if util.safehasattr(self, b'_fsmonitorstate'):
+    if hasattr(self, b'_fsmonitorstate'):
         makedirstate(self, ds)
     return ds
 
@@ -755,9 +755,9 @@
     )
     if pycompat.isdarwin:
         # An assist for avoiding the dangling-symlink fsevents bug
-        extensions.wrapfunction(os, b'symlink', wrapsymlink)
+        extensions.wrapfunction(os, 'symlink', wrapsymlink)
 
-    extensions.wrapfunction(merge, b'_update', wrapupdate)
+    extensions.wrapfunction(merge, '_update', wrapupdate)
 
 
 def wrapsymlink(orig, source, link_name):
@@ -811,7 +811,7 @@
             self.oldnode = self.repo[b'.'].node()
 
         if self.repo.currentwlock() is None:
-            if util.safehasattr(self.repo, b'wlocknostateupdate'):
+            if hasattr(self.repo, b'wlocknostateupdate'):
                 self._lock = self.repo.wlocknostateupdate()
             else:
                 self._lock = self.repo.wlock()
@@ -839,7 +839,7 @@
                 self._lock.release()
 
     def _state(self, cmd, commithash, status=b'ok'):
-        if not util.safehasattr(self.repo, b'_watchmanclient'):
+        if not hasattr(self.repo, b'_watchmanclient'):
             return False
         try:
             self.repo._watchmanclient.command(
--- a/hgext/fsmonitor/watchmanclient.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/fsmonitor/watchmanclient.py	Tue Nov 07 15:21:11 2023 +0100
@@ -69,7 +69,7 @@
 
     def getcurrentclock(self):
         result = self.command(b'clock')
-        if not util.safehasattr(result, 'clock'):
+        if not hasattr(result, 'clock'):
             raise Unavailable(
                 b'clock result is missing clock value', invalidate=True
             )
--- a/hgext/git/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/git/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -342,8 +342,8 @@
 
 
 def extsetup(ui):
-    extensions.wrapfunction(localrepo, b'makestore', _makestore)
-    extensions.wrapfunction(localrepo, b'makefilestorage', _makefilestorage)
+    extensions.wrapfunction(localrepo, 'makestore', _makestore)
+    extensions.wrapfunction(localrepo, 'makefilestorage', _makefilestorage)
     # Inject --git flag for `hg init`
     entry = extensions.wrapcommand(commands.table, b'init', init)
     entry[1].extend(
--- a/hgext/git/dirstate.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/git/dirstate.py	Tue Nov 07 15:21:11 2023 +0100
@@ -47,7 +47,7 @@
     return result, warnings
 
 
-extensions.wrapfunction(matchmod, b'readpatternfile', readpatternfile)
+extensions.wrapfunction(matchmod, 'readpatternfile', readpatternfile)
 
 
 _STATUS_MAP = {}
--- a/hgext/gpg.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/gpg.py	Tue Nov 07 15:21:11 2023 +0100
@@ -301,13 +301,13 @@
 
 def _dosign(ui, repo, *revs, **opts):
     mygpg = newgpg(ui, **opts)
-    opts = pycompat.byteskwargs(opts)
+
     sigver = b"0"
     sigmessage = b""
 
-    date = opts.get(b'date')
+    date = opts.get('date')
     if date:
-        opts[b'date'] = dateutil.parsedate(date)
+        opts['date'] = dateutil.parsedate(date)
 
     if revs:
         nodes = [repo.lookup(n) for n in revs]
@@ -335,42 +335,39 @@
         sigmessage += b"%s %s %s\n" % (hexnode, sigver, sig)
 
     # write it
-    if opts[b'local']:
+    if opts['local']:
         repo.vfs.append(b"localsigs", sigmessage)
         return
 
     msigs = match.exact([b'.hgsigs'])
 
-    if not opts[b"force"]:
+    if not opts["force"]:
         if any(repo.status(match=msigs, unknown=True, ignored=True)):
             raise error.Abort(
                 _(b"working copy of .hgsigs is changed "),
                 hint=_(b"please commit .hgsigs manually"),
             )
 
-    sigsfile = repo.wvfs(b".hgsigs", b"ab")
-    sigsfile.write(sigmessage)
-    sigsfile.close()
+    with repo.wvfs(b".hgsigs", b"ab") as sigsfile:
+        sigsfile.write(sigmessage)
 
     if b'.hgsigs' not in repo.dirstate:
         with repo.dirstate.changing_files(repo):
             repo[None].add([b".hgsigs"])
 
-    if opts[b"no_commit"]:
+    if opts["no_commit"]:
         return
 
-    message = opts[b'message']
+    message = opts['message']
     if not message:
         # we don't translate commit messages
         message = b"\n".join(
             [b"Added signature for changeset %s" % short(n) for n in nodes]
         )
     try:
-        editor = cmdutil.getcommiteditor(
-            editform=b'gpg.sign', **pycompat.strkwargs(opts)
-        )
+        editor = cmdutil.getcommiteditor(editform=b'gpg.sign', **opts)
         repo.commit(
-            message, opts[b'user'], opts[b'date'], match=msigs, editor=editor
+            message, opts['user'], opts['date'], match=msigs, editor=editor
         )
     except ValueError as inst:
         raise error.Abort(pycompat.bytestr(inst))
--- a/hgext/highlight/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/highlight/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -101,8 +101,8 @@
 def extsetup(ui):
     # monkeypatch in the new version
     extensions.wrapfunction(
-        webcommands, b'_filerevision', filerevision_highlight
+        webcommands, '_filerevision', filerevision_highlight
     )
-    extensions.wrapfunction(webcommands, b'annotate', annotate_highlight)
+    extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
     webcommands.highlightcss = generate_css
     webcommands.__all__.append(b'highlightcss')
--- a/hgext/histedit.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/histedit.py	Tue Nov 07 15:21:11 2023 +0100
@@ -207,7 +207,6 @@
 
 from mercurial.i18n import _
 from mercurial.pycompat import (
-    getattr,
     open,
 )
 from mercurial.node import (
@@ -2652,7 +2651,7 @@
     return orig(ui, repo, nodelist, *args, **kwargs)
 
 
-extensions.wrapfunction(repair, b'strip', stripwrapper)
+extensions.wrapfunction(repair, 'strip', stripwrapper)
 
 
 def summaryhook(ui, repo):
--- a/hgext/infinitepush/README	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-## What is it?
-
-This extension adds ability to save certain pushes to a remote blob store
-as bundles and to serve commits from remote blob store.
-The revisions are stored on disk or in everstore.
-The metadata are stored in sql or on disk.
-
-## Config options
-
-infinitepush.branchpattern: pattern to detect a scratchbranch, example
-                            're:scratch/.+'
-
-infinitepush.indextype: disk or sql for the metadata
-infinitepush.reponame: only relevant for sql metadata backend, reponame to put in
-                       sql
-
-infinitepush.indexpath: only relevant for ondisk metadata backend, the path to
-                        store the index on disk. If not set will be under .hg
-                        in a folder named filebundlestore
-
-infinitepush.storepath: only relevant for ondisk metadata backend, the path to
-                        store the bundles. If not set, it will be
-                        .hg/filebundlestore
--- a/hgext/infinitepush/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1413 +0,0 @@
-# Infinite push
-#
-# Copyright 2016 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-""" store some pushes in a remote blob store on the server (EXPERIMENTAL)
-
-IMPORTANT: if you use this extension, please contact
-mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
-be unused and barring learning of users of this functionality, we will
-delete this code at the end of 2020.
-
-    [infinitepush]
-    # Server-side and client-side option. Pattern of the infinitepush bookmark
-    branchpattern = PATTERN
-
-    # Server or client
-    server = False
-
-    # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
-    indextype = disk
-
-    # Server-side option. Used only if indextype=sql.
-    # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
-    sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
-
-    # Server-side option. Used only if indextype=disk.
-    # Filesystem path to the index store
-    indexpath = PATH
-
-    # Server-side option. Possible values: 'disk' or 'external'
-    # Fails if not set
-    storetype = disk
-
-    # Server-side option.
-    # Path to the binary that will save bundle to the bundlestore
-    # Formatted cmd line will be passed to it (see `put_args`)
-    put_binary = put
-
-    # Serser-side option. Used only if storetype=external.
-    # Format cmd-line string for put binary. Placeholder: {filename}
-    put_args = {filename}
-
-    # Server-side option.
-    # Path to the binary that get bundle from the bundlestore.
-    # Formatted cmd line will be passed to it (see `get_args`)
-    get_binary = get
-
-    # Serser-side option. Used only if storetype=external.
-    # Format cmd-line string for get binary. Placeholders: {filename} {handle}
-    get_args = {filename} {handle}
-
-    # Server-side option
-    logfile = FIlE
-
-    # Server-side option
-    loglevel = DEBUG
-
-    # Server-side option. Used only if indextype=sql.
-    # Sets mysql wait_timeout option.
-    waittimeout = 300
-
-    # Server-side option. Used only if indextype=sql.
-    # Sets mysql innodb_lock_wait_timeout option.
-    locktimeout = 120
-
-    # Server-side option. Used only if indextype=sql.
-    # Name of the repository
-    reponame = ''
-
-    # Client-side option. Used by --list-remote option. List of remote scratch
-    # patterns to list if no patterns are specified.
-    defaultremotepatterns = ['*']
-
-    # Instructs infinitepush to forward all received bundle2 parts to the
-    # bundle for storage. Defaults to False.
-    storeallparts = True
-
-    # routes each incoming push to the bundlestore. defaults to False
-    pushtobundlestore = True
-
-    [remotenames]
-    # Client-side option
-    # This option should be set only if remotenames extension is enabled.
-    # Whether remote bookmarks are tracked by remotenames extension.
-    bookmarks = True
-"""
-
-
-import collections
-import contextlib
-import functools
-import logging
-import os
-import random
-import re
-import socket
-import subprocess
-import time
-
-from mercurial.node import (
-    bin,
-    hex,
-)
-
-from mercurial.i18n import _
-
-from mercurial.pycompat import (
-    getattr,
-    open,
-)
-
-from mercurial.utils import (
-    procutil,
-    stringutil,
-    urlutil,
-)
-
-from mercurial import (
-    bundle2,
-    changegroup,
-    commands,
-    discovery,
-    encoding,
-    error,
-    exchange,
-    extensions,
-    hg,
-    localrepo,
-    phases,
-    pushkey,
-    pycompat,
-    registrar,
-    util,
-    wireprototypes,
-    wireprotov1peer,
-    wireprotov1server,
-)
-
-from . import (
-    bundleparts,
-    common,
-)
-
-# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
-# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
-# be specifying the version(s) of Mercurial they are tested with, or
-# leave the attribute unspecified.
-testedwith = b'ships-with-hg-core'
-
-configtable = {}
-configitem = registrar.configitem(configtable)
-
-configitem(
-    b'infinitepush',
-    b'deprecation-message',
-    default=True,
-)
-
-configitem(
-    b'infinitepush',
-    b'deprecation-abort',
-    default=True,
-)
-
-configitem(
-    b'infinitepush',
-    b'server',
-    default=False,
-)
-configitem(
-    b'infinitepush',
-    b'storetype',
-    default=b'',
-)
-configitem(
-    b'infinitepush',
-    b'indextype',
-    default=b'',
-)
-configitem(
-    b'infinitepush',
-    b'indexpath',
-    default=b'',
-)
-configitem(
-    b'infinitepush',
-    b'storeallparts',
-    default=False,
-)
-configitem(
-    b'infinitepush',
-    b'reponame',
-    default=b'',
-)
-configitem(
-    b'scratchbranch',
-    b'storepath',
-    default=b'',
-)
-configitem(
-    b'infinitepush',
-    b'branchpattern',
-    default=b'',
-)
-configitem(
-    b'infinitepush',
-    b'pushtobundlestore',
-    default=False,
-)
-configitem(
-    b'experimental',
-    b'server-bundlestore-bookmark',
-    default=b'',
-)
-configitem(
-    b'experimental',
-    b'infinitepush-scratchpush',
-    default=False,
-)
-
-experimental = b'experimental'
-configbookmark = b'server-bundlestore-bookmark'
-configscratchpush = b'infinitepush-scratchpush'
-
-scratchbranchparttype = bundleparts.scratchbranchparttype
-revsetpredicate = registrar.revsetpredicate()
-templatekeyword = registrar.templatekeyword()
-_scratchbranchmatcher = lambda x: False
-_maybehash = re.compile('^[a-f0-9]+$').search
-
-
-def _buildexternalbundlestore(ui):
-    put_args = ui.configlist(b'infinitepush', b'put_args', [])
-    put_binary = ui.config(b'infinitepush', b'put_binary')
-    if not put_binary:
-        raise error.Abort(b'put binary is not specified')
-    get_args = ui.configlist(b'infinitepush', b'get_args', [])
-    get_binary = ui.config(b'infinitepush', b'get_binary')
-    if not get_binary:
-        raise error.Abort(b'get binary is not specified')
-    from . import store
-
-    return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
-
-
-def _buildsqlindex(ui):
-    sqlhost = ui.config(b'infinitepush', b'sqlhost')
-    if not sqlhost:
-        raise error.Abort(_(b'please set infinitepush.sqlhost'))
-    host, port, db, user, password = sqlhost.split(b':')
-    reponame = ui.config(b'infinitepush', b'reponame')
-    if not reponame:
-        raise error.Abort(_(b'please set infinitepush.reponame'))
-
-    logfile = ui.config(b'infinitepush', b'logfile', b'')
-    waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
-    locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
-    from . import sqlindexapi
-
-    return sqlindexapi.sqlindexapi(
-        reponame,
-        host,
-        port,
-        db,
-        user,
-        password,
-        logfile,
-        _getloglevel(ui),
-        waittimeout=waittimeout,
-        locktimeout=locktimeout,
-    )
-
-
-def _getloglevel(ui):
-    loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
-    numeric_loglevel = getattr(logging, loglevel.upper(), None)
-    if not isinstance(numeric_loglevel, int):
-        raise error.Abort(_(b'invalid log level %s') % loglevel)
-    return numeric_loglevel
-
-
-def _tryhoist(ui, remotebookmark):
-    """returns a bookmarks with hoisted part removed
-
-    Remotenames extension has a 'hoist' config that allows to use remote
-    bookmarks without specifying remote path. For example, 'hg update master'
-    works as well as 'hg update remote/master'. We want to allow the same in
-    infinitepush.
-    """
-
-    if common.isremotebooksenabled(ui):
-        hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
-        if remotebookmark.startswith(hoist):
-            return remotebookmark[len(hoist) :]
-    return remotebookmark
-
-
-class bundlestore:
-    def __init__(self, repo):
-        self._repo = repo
-        storetype = self._repo.ui.config(b'infinitepush', b'storetype')
-        if storetype == b'disk':
-            from . import store
-
-            self.store = store.filebundlestore(self._repo.ui, self._repo)
-        elif storetype == b'external':
-            self.store = _buildexternalbundlestore(self._repo.ui)
-        else:
-            raise error.Abort(
-                _(b'unknown infinitepush store type specified %s') % storetype
-            )
-
-        indextype = self._repo.ui.config(b'infinitepush', b'indextype')
-        if indextype == b'disk':
-            from . import fileindexapi
-
-            self.index = fileindexapi.fileindexapi(self._repo)
-        elif indextype == b'sql':
-            self.index = _buildsqlindex(self._repo.ui)
-        else:
-            raise error.Abort(
-                _(b'unknown infinitepush index type specified %s') % indextype
-            )
-
-
-def _isserver(ui):
-    return ui.configbool(b'infinitepush', b'server')
-
-
-WARNING_MSG = b"""IMPORTANT: if you use this extension, please contact
-mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-unused and barring learning of users of this functionality, we drop this
-extension in Mercurial 6.6.
-"""
-
-
-def reposetup(ui, repo):
-    if ui.configbool(b'infinitepush', b'deprecation-message'):
-        ui.write_err(WARNING_MSG)
-    if ui.configbool(b'infinitepush', b'deprecation-abort'):
-        msg = b"USING EXTENSION INFINITE PUSH DESPITE PENDING DROP"
-        hint = b"contact mercurial-devel@mercurial-scm.org"
-        raise error.Abort(msg, hint=hint)
-    if _isserver(ui) and repo.local():
-        repo.bundlestore = bundlestore(repo)
-
-
-def extsetup(ui):
-    commonsetup(ui)
-    if _isserver(ui):
-        serverextsetup(ui)
-    else:
-        clientextsetup(ui)
-
-
-def uipopulate(ui):
-    if not ui.hasconfig(b"experimental", b"changegroup3"):
-        ui.setconfig(b"experimental", b"changegroup3", False, b"infinitepush")
-
-
-def commonsetup(ui):
-    wireprotov1server.commands[b'listkeyspatterns'] = (
-        wireprotolistkeyspatterns,
-        b'namespace patterns',
-    )
-    scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
-    if scratchbranchpat:
-        global _scratchbranchmatcher
-        kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
-            scratchbranchpat
-        )
-
-
-def serverextsetup(ui):
-    origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
-
-    def newpushkeyhandler(*args, **kwargs):
-        bundle2pushkey(origpushkeyhandler, *args, **kwargs)
-
-    newpushkeyhandler.params = origpushkeyhandler.params
-    bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
-
-    orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
-    newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
-        orighandlephasehandler, *args, **kwargs
-    )
-    newphaseheadshandler.params = orighandlephasehandler.params
-    bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
-
-    extensions.wrapfunction(
-        localrepo.localrepository, b'listkeys', localrepolistkeys
-    )
-    wireprotov1server.commands[b'lookup'] = (
-        _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
-        b'key',
-    )
-    extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
-
-    extensions.wrapfunction(bundle2, b'processparts', processparts)
-
-
-def clientextsetup(ui):
-    entry = extensions.wrapcommand(commands.table, b'push', _push)
-
-    entry[1].append(
-        (
-            b'',
-            b'bundle-store',
-            None,
-            _(b'force push to go to bundle store (EXPERIMENTAL)'),
-        )
-    )
-
-    extensions.wrapcommand(commands.table, b'pull', _pull)
-
-    extensions.wrapfunction(discovery, b'checkheads', _checkheads)
-
-    wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
-
-    partorder = exchange.b2partsgenorder
-    index = partorder.index(b'changeset')
-    partorder.insert(
-        index, partorder.pop(partorder.index(scratchbranchparttype))
-    )
-
-
-def _checkheads(orig, pushop):
-    if pushop.ui.configbool(experimental, configscratchpush, False):
-        return
-    return orig(pushop)
-
-
-def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
-    patterns = wireprototypes.decodelist(patterns)
-    d = repo.listkeys(encoding.tolocal(namespace), patterns).items()
-    return pushkey.encodekeys(d)
-
-
-def localrepolistkeys(orig, self, namespace, patterns=None):
-    if namespace == b'bookmarks' and patterns:
-        index = self.bundlestore.index
-        results = {}
-        bookmarks = orig(self, namespace)
-        for pattern in patterns:
-            results.update(index.getbookmarks(pattern))
-            if pattern.endswith(b'*'):
-                pattern = b're:^' + pattern[:-1] + b'.*'
-            kind, pat, matcher = stringutil.stringmatcher(pattern)
-            for bookmark, node in bookmarks.items():
-                if matcher(bookmark):
-                    results[bookmark] = node
-        return results
-    else:
-        return orig(self, namespace)
-
-
-@wireprotov1peer.batchable
-def listkeyspatterns(self, namespace, patterns):
-    if not self.capable(b'pushkey'):
-        return {}, None
-    self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
-
-    def decode(d):
-        self.ui.debug(
-            b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
-        )
-        return pushkey.decodekeys(d)
-
-    return {
-        b'namespace': encoding.fromlocal(namespace),
-        b'patterns': wireprototypes.encodelist(patterns),
-    }, decode
-
-
-def _readbundlerevs(bundlerepo):
-    return list(bundlerepo.revs(b'bundle()'))
-
-
-def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
-    """Tells remotefilelog to include all changed files to the changegroup
-
-    By default remotefilelog doesn't include file content to the changegroup.
-    But we need to include it if we are fetching from bundlestore.
-    """
-    changedfiles = set()
-    cl = bundlerepo.changelog
-    for r in bundlerevs:
-        # [3] means changed files
-        changedfiles.update(cl.read(r)[3])
-    if not changedfiles:
-        return bundlecaps
-
-    changedfiles = b'\0'.join(changedfiles)
-    newcaps = []
-    appended = False
-    for cap in bundlecaps or []:
-        if cap.startswith(b'excludepattern='):
-            newcaps.append(b'\0'.join((cap, changedfiles)))
-            appended = True
-        else:
-            newcaps.append(cap)
-    if not appended:
-        # Not found excludepattern cap. Just append it
-        newcaps.append(b'excludepattern=' + changedfiles)
-
-    return newcaps
-
-
-def _rebundle(bundlerepo, bundleroots, unknownhead):
-    """
-    Bundle may include more revision then user requested. For example,
-    if user asks for revision but bundle also consists its descendants.
-    This function will filter out all revision that user is not requested.
-    """
-    parts = []
-
-    version = b'02'
-    outgoing = discovery.outgoing(
-        bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
-    )
-    cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
-    cgstream = util.chunkbuffer(cgstream).read()
-    cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
-    cgpart.addparam(b'version', version)
-    parts.append(cgpart)
-
-    return parts
-
-
-def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
-    cl = bundlerepo.changelog
-    bundleroots = []
-    for rev in bundlerevs:
-        node = cl.node(rev)
-        parents = cl.parents(node)
-        for parent in parents:
-            # include all revs that exist in the main repo
-            # to make sure that bundle may apply client-side
-            if parent in oldrepo:
-                bundleroots.append(parent)
-    return bundleroots
-
-
-def _needsrebundling(head, bundlerepo):
-    bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
-    return not (
-        len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
-    )
-
-
-def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
-    """generates bundle that will be send to the user
-
-    returns tuple with raw bundle string and bundle type
-    """
-    parts = []
-    if not _needsrebundling(head, bundlerepo):
-        with util.posixfile(bundlefile, b"rb") as f:
-            unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
-            if isinstance(unbundler, changegroup.cg1unpacker):
-                part = bundle2.bundlepart(
-                    b'changegroup', data=unbundler._stream.read()
-                )
-                part.addparam(b'version', b'01')
-                parts.append(part)
-            elif isinstance(unbundler, bundle2.unbundle20):
-                haschangegroup = False
-                for part in unbundler.iterparts():
-                    if part.type == b'changegroup':
-                        haschangegroup = True
-                    newpart = bundle2.bundlepart(part.type, data=part.read())
-                    for key, value in part.params.items():
-                        newpart.addparam(key, value)
-                    parts.append(newpart)
-
-                if not haschangegroup:
-                    raise error.Abort(
-                        b'unexpected bundle without changegroup part, '
-                        + b'head: %s' % hex(head),
-                        hint=b'report to administrator',
-                    )
-            else:
-                raise error.Abort(b'unknown bundle type')
-    else:
-        parts = _rebundle(bundlerepo, bundleroots, head)
-
-    return parts
-
-
-def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
-    heads = heads or []
-    # newheads are parents of roots of scratch bundles that were requested
-    newphases = {}
-    scratchbundles = []
-    newheads = []
-    scratchheads = []
-    nodestobundle = {}
-    allbundlestocleanup = []
-    try:
-        for head in heads:
-            if not repo.changelog.index.has_node(head):
-                if head not in nodestobundle:
-                    newbundlefile = common.downloadbundle(repo, head)
-                    bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
-                    bundlerepo = hg.repository(repo.ui, bundlepath)
-
-                    allbundlestocleanup.append((bundlerepo, newbundlefile))
-                    bundlerevs = set(_readbundlerevs(bundlerepo))
-                    bundlecaps = _includefilelogstobundle(
-                        bundlecaps, bundlerepo, bundlerevs, repo.ui
-                    )
-                    cl = bundlerepo.changelog
-                    bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
-                    for rev in bundlerevs:
-                        node = cl.node(rev)
-                        newphases[hex(node)] = str(phases.draft)
-                        nodestobundle[node] = (
-                            bundlerepo,
-                            bundleroots,
-                            newbundlefile,
-                        )
-
-                scratchbundles.append(
-                    _generateoutputparts(head, *nodestobundle[head])
-                )
-                newheads.extend(bundleroots)
-                scratchheads.append(head)
-    finally:
-        for bundlerepo, bundlefile in allbundlestocleanup:
-            bundlerepo.close()
-            try:
-                os.unlink(bundlefile)
-            except (IOError, OSError):
-                # if we can't cleanup the file then just ignore the error,
-                # no need to fail
-                pass
-
-    pullfrombundlestore = bool(scratchbundles)
-    wrappedchangegrouppart = False
-    wrappedlistkeys = False
-    oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
-    try:
-
-        def _changegrouppart(bundler, *args, **kwargs):
-            # Order is important here. First add non-scratch part
-            # and only then add parts with scratch bundles because
-            # non-scratch part contains parents of roots of scratch bundles.
-            result = oldchangegrouppart(bundler, *args, **kwargs)
-            for bundle in scratchbundles:
-                for part in bundle:
-                    bundler.addpart(part)
-            return result
-
-        exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
-        wrappedchangegrouppart = True
-
-        def _listkeys(orig, self, namespace):
-            origvalues = orig(self, namespace)
-            if namespace == b'phases' and pullfrombundlestore:
-                if origvalues.get(b'publishing') == b'True':
-                    # Make repo non-publishing to preserve draft phase
-                    del origvalues[b'publishing']
-                origvalues.update(newphases)
-            return origvalues
-
-        extensions.wrapfunction(
-            localrepo.localrepository, b'listkeys', _listkeys
-        )
-        wrappedlistkeys = True
-        heads = list((set(newheads) | set(heads)) - set(scratchheads))
-        result = orig(
-            repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
-        )
-    finally:
-        if wrappedchangegrouppart:
-            exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
-        if wrappedlistkeys:
-            extensions.unwrapfunction(
-                localrepo.localrepository, b'listkeys', _listkeys
-            )
-    return result
-
-
-def _lookupwrap(orig):
-    def _lookup(repo, proto, key):
-        localkey = encoding.tolocal(key)
-
-        if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
-            scratchnode = repo.bundlestore.index.getnode(localkey)
-            if scratchnode:
-                return b"%d %s\n" % (1, scratchnode)
-            else:
-                return b"%d %s\n" % (
-                    0,
-                    b'scratch branch %s not found' % localkey,
-                )
-        else:
-            try:
-                r = hex(repo.lookup(localkey))
-                return b"%d %s\n" % (1, r)
-            except Exception as inst:
-                if repo.bundlestore.index.getbundle(localkey):
-                    return b"%d %s\n" % (1, localkey)
-                else:
-                    r = stringutil.forcebytestr(inst)
-                    return b"%d %s\n" % (0, r)
-
-    return _lookup
-
-
-def _pull(orig, ui, repo, source=b"default", **opts):
-    opts = pycompat.byteskwargs(opts)
-    # Copy paste from `pull` command
-    path = urlutil.get_unique_pull_path_obj(
-        b"infinite-push's pull",
-        ui,
-        source,
-    )
-
-    scratchbookmarks = {}
-    unfi = repo.unfiltered()
-    unknownnodes = []
-    for rev in opts.get(b'rev', []):
-        if rev not in unfi:
-            unknownnodes.append(rev)
-    if opts.get(b'bookmark'):
-        bookmarks = []
-        revs = opts.get(b'rev') or []
-        for bookmark in opts.get(b'bookmark'):
-            if _scratchbranchmatcher(bookmark):
-                # rev is not known yet
-                # it will be fetched with listkeyspatterns next
-                scratchbookmarks[bookmark] = b'REVTOFETCH'
-            else:
-                bookmarks.append(bookmark)
-
-        if scratchbookmarks:
-            other = hg.peer(repo, opts, path)
-            try:
-                fetchedbookmarks = other.listkeyspatterns(
-                    b'bookmarks', patterns=scratchbookmarks
-                )
-                for bookmark in scratchbookmarks:
-                    if bookmark not in fetchedbookmarks:
-                        raise error.Abort(
-                            b'remote bookmark %s not found!' % bookmark
-                        )
-                    scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
-                    revs.append(fetchedbookmarks[bookmark])
-            finally:
-                other.close()
-        opts[b'bookmark'] = bookmarks
-        opts[b'rev'] = revs
-
-    if scratchbookmarks or unknownnodes:
-        # Set anyincoming to True
-        extensions.wrapfunction(
-            discovery, b'findcommonincoming', _findcommonincoming
-        )
-    try:
-        # Remote scratch bookmarks will be deleted because remotenames doesn't
-        # know about them. Let's save it before pull and restore after
-        remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, path.loc)
-        result = orig(ui, repo, path.loc, **pycompat.strkwargs(opts))
-        # TODO(stash): race condition is possible
-        # if scratch bookmarks was updated right after orig.
-        # But that's unlikely and shouldn't be harmful.
-        if common.isremotebooksenabled(ui):
-            remotescratchbookmarks.update(scratchbookmarks)
-            _saveremotebookmarks(repo, remotescratchbookmarks, path.loc)
-        else:
-            _savelocalbookmarks(repo, scratchbookmarks)
-        return result
-    finally:
-        if scratchbookmarks:
-            extensions.unwrapfunction(discovery, b'findcommonincoming')
-
-
-def _readscratchremotebookmarks(ui, repo, other):
-    if common.isremotebooksenabled(ui):
-        remotenamesext = extensions.find(b'remotenames')
-        remotepath = remotenamesext.activepath(repo.ui, other)
-        result = {}
-        # Let's refresh remotenames to make sure we have it up to date
-        # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
-        # and it results in deleting scratch bookmarks. Our best guess how to
-        # fix it is to use `clearnames()`
-        repo._remotenames.clearnames()
-        for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
-            path, bookname = remotenamesext.splitremotename(remotebookmark)
-            if path == remotepath and _scratchbranchmatcher(bookname):
-                nodes = repo.names[b'remotebookmarks'].nodes(
-                    repo, remotebookmark
-                )
-                if nodes:
-                    result[bookname] = hex(nodes[0])
-        return result
-    else:
-        return {}
-
-
-def _saveremotebookmarks(repo, newbookmarks, remote):
-    remotenamesext = extensions.find(b'remotenames')
-    remotepath = remotenamesext.activepath(repo.ui, remote)
-    branches = collections.defaultdict(list)
-    bookmarks = {}
-    remotenames = remotenamesext.readremotenames(repo)
-    for hexnode, nametype, remote, rname in remotenames:
-        if remote != remotepath:
-            continue
-        if nametype == b'bookmarks':
-            if rname in newbookmarks:
-                # It's possible if we have a normal bookmark that matches
-                # scratch branch pattern. In this case just use the current
-                # bookmark node
-                del newbookmarks[rname]
-            bookmarks[rname] = hexnode
-        elif nametype == b'branches':
-            # saveremotenames expects 20 byte binary nodes for branches
-            branches[rname].append(bin(hexnode))
-
-    for bookmark, hexnode in newbookmarks.items():
-        bookmarks[bookmark] = hexnode
-    remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
-
-
-def _savelocalbookmarks(repo, bookmarks):
-    if not bookmarks:
-        return
-    with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
-        changes = []
-        for scratchbook, node in bookmarks.items():
-            changectx = repo[node]
-            changes.append((scratchbook, changectx.node()))
-        repo._bookmarks.applychanges(repo, tr, changes)
-
-
-def _findcommonincoming(orig, *args, **kwargs):
-    common, inc, remoteheads = orig(*args, **kwargs)
-    return common, True, remoteheads
-
-
-def _push(orig, ui, repo, *dests, **opts):
-    opts = pycompat.byteskwargs(opts)
-    bookmark = opts.get(b'bookmark')
-    # we only support pushing one infinitepush bookmark at once
-    if len(bookmark) == 1:
-        bookmark = bookmark[0]
-    else:
-        bookmark = b''
-
-    oldphasemove = None
-    overrides = {(experimental, configbookmark): bookmark}
-
-    with ui.configoverride(overrides, b'infinitepush'):
-        scratchpush = opts.get(b'bundle_store')
-        if _scratchbranchmatcher(bookmark):
-            scratchpush = True
-            # bundle2 can be sent back after push (for example, bundle2
-            # containing `pushkey` part to update bookmarks)
-            ui.setconfig(experimental, b'bundle2.pushback', True)
-
-        if scratchpush:
-            # this is an infinitepush, we don't want the bookmark to be applied
-            # rather that should be stored in the bundlestore
-            opts[b'bookmark'] = []
-            ui.setconfig(experimental, configscratchpush, True)
-            oldphasemove = extensions.wrapfunction(
-                exchange, b'_localphasemove', _phasemove
-            )
-
-        paths = list(urlutil.get_push_paths(repo, ui, dests))
-        if len(paths) > 1:
-            msg = _(b'cannot push to multiple path with infinitepush')
-            raise error.Abort(msg)
-
-        path = paths[0]
-        destpath = path.loc
-        # Remote scratch bookmarks will be deleted because remotenames doesn't
-        # know about them. Let's save it before push and restore after
-        remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
-        result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
-        if common.isremotebooksenabled(ui):
-            if bookmark and scratchpush:
-                other = hg.peer(repo, opts, path)
-                try:
-                    fetchedbookmarks = other.listkeyspatterns(
-                        b'bookmarks', patterns=[bookmark]
-                    )
-                    remotescratchbookmarks.update(fetchedbookmarks)
-                finally:
-                    other.close()
-            _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
-    if oldphasemove:
-        exchange._localphasemove = oldphasemove
-    return result
-
-
-def _deleteinfinitepushbookmarks(ui, repo, path, names):
-    """Prune remote names by removing the bookmarks we don't want anymore,
-    then writing the result back to disk
-    """
-    remotenamesext = extensions.find(b'remotenames')
-
-    # remotename format is:
-    # (node, nametype ("branches" or "bookmarks"), remote, name)
-    nametype_idx = 1
-    remote_idx = 2
-    name_idx = 3
-    remotenames = [
-        remotename
-        for remotename in remotenamesext.readremotenames(repo)
-        if remotename[remote_idx] == path
-    ]
-    remote_bm_names = [
-        remotename[name_idx]
-        for remotename in remotenames
-        if remotename[nametype_idx] == b"bookmarks"
-    ]
-
-    for name in names:
-        if name not in remote_bm_names:
-            raise error.Abort(
-                _(
-                    b"infinitepush bookmark '{}' does not exist "
-                    b"in path '{}'"
-                ).format(name, path)
-            )
-
-    bookmarks = {}
-    branches = collections.defaultdict(list)
-    for node, nametype, remote, name in remotenames:
-        if nametype == b"bookmarks" and name not in names:
-            bookmarks[name] = node
-        elif nametype == b"branches":
-            # saveremotenames wants binary nodes for branches
-            branches[name].append(bin(node))
-
-    remotenamesext.saveremotenames(repo, path, branches, bookmarks)
-
-
-def _phasemove(orig, pushop, nodes, phase=phases.public):
-    """prevent commits from being marked public
-
-    Since these are going to a scratch branch, they aren't really being
-    published."""
-
-    if phase != phases.public:
-        orig(pushop, nodes, phase)
-
-
-@exchange.b2partsgenerator(scratchbranchparttype)
-def partgen(pushop, bundler):
-    bookmark = pushop.ui.config(experimental, configbookmark)
-    scratchpush = pushop.ui.configbool(experimental, configscratchpush)
-    if b'changesets' in pushop.stepsdone or not scratchpush:
-        return
-
-    if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
-        return
-
-    pushop.stepsdone.add(b'changesets')
-    if not pushop.outgoing.missing:
-        pushop.ui.status(_(b'no changes found\n'))
-        pushop.cgresult = 0
-        return
-
-    # This parameter tells the server that the following bundle is an
-    # infinitepush. This let's it switch the part processing to our infinitepush
-    # code path.
-    bundler.addparam(b"infinitepush", b"True")
-
-    scratchparts = bundleparts.getscratchbranchparts(
-        pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
-    )
-
-    for scratchpart in scratchparts:
-        bundler.addpart(scratchpart)
-
-    def handlereply(op):
-        # server either succeeds or aborts; no code to read
-        pushop.cgresult = 1
-
-    return handlereply
-
-
-bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
-
-
-def _getrevs(bundle, oldnode, force, bookmark):
-    b'extracts and validates the revs to be imported'
-    revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
-
-    # new bookmark
-    if oldnode is None:
-        return revs
-
-    # Fast forward update
-    if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
-        return revs
-
-    return revs
-
-
-@contextlib.contextmanager
-def logservicecall(logger, service, **kwargs):
-    start = time.time()
-    logger(service, eventtype=b'start', **kwargs)
-    try:
-        yield
-        logger(
-            service,
-            eventtype=b'success',
-            elapsedms=(time.time() - start) * 1000,
-            **kwargs
-        )
-    except Exception as e:
-        logger(
-            service,
-            eventtype=b'failure',
-            elapsedms=(time.time() - start) * 1000,
-            errormsg=stringutil.forcebytestr(e),
-            **kwargs
-        )
-        raise
-
-
-def _getorcreateinfinitepushlogger(op):
-    logger = op.records[b'infinitepushlogger']
-    if not logger:
-        ui = op.repo.ui
-        try:
-            username = procutil.getuser()
-        except Exception:
-            username = b'unknown'
-        # Generate random request id to be able to find all logged entries
-        # for the same request. Since requestid is pseudo-generated it may
-        # not be unique, but we assume that (hostname, username, requestid)
-        # is unique.
-        random.seed()
-        requestid = random.randint(0, 2000000000)
-        hostname = socket.gethostname()
-        logger = functools.partial(
-            ui.log,
-            b'infinitepush',
-            user=username,
-            requestid=requestid,
-            hostname=hostname,
-            reponame=ui.config(b'infinitepush', b'reponame'),
-        )
-        op.records.add(b'infinitepushlogger', logger)
-    else:
-        logger = logger[0]
-    return logger
-
-
-def storetobundlestore(orig, repo, op, unbundler):
-    """stores the incoming bundle coming from push command to the bundlestore
-    instead of applying on the revlogs"""
-
-    repo.ui.status(_(b"storing changesets on the bundlestore\n"))
-    bundler = bundle2.bundle20(repo.ui)
-
-    # processing each part and storing it in bundler
-    with bundle2.partiterator(repo, op, unbundler) as parts:
-        for part in parts:
-            bundlepart = None
-            if part.type == b'replycaps':
-                # This configures the current operation to allow reply parts.
-                bundle2._processpart(op, part)
-            else:
-                bundlepart = bundle2.bundlepart(part.type, data=part.read())
-                for key, value in part.params.items():
-                    bundlepart.addparam(key, value)
-
-                # Certain parts require a response
-                if part.type in (b'pushkey', b'changegroup'):
-                    if op.reply is not None:
-                        rpart = op.reply.newpart(b'reply:%s' % part.type)
-                        rpart.addparam(
-                            b'in-reply-to', b'%d' % part.id, mandatory=False
-                        )
-                        rpart.addparam(b'return', b'1', mandatory=False)
-
-            op.records.add(
-                part.type,
-                {
-                    b'return': 1,
-                },
-            )
-            if bundlepart:
-                bundler.addpart(bundlepart)
-
-    # storing the bundle in the bundlestore
-    buf = util.chunkbuffer(bundler.getchunks())
-    fd, bundlefile = pycompat.mkstemp()
-    try:
-        try:
-            fp = os.fdopen(fd, 'wb')
-            fp.write(buf.read())
-        finally:
-            fp.close()
-        storebundle(op, {}, bundlefile)
-    finally:
-        try:
-            os.unlink(bundlefile)
-        except Exception:
-            # we would rather see the original exception
-            pass
-
-
-def processparts(orig, repo, op, unbundler):
-
-    # make sure we don't wrap processparts in case of `hg unbundle`
-    if op.source == b'unbundle':
-        return orig(repo, op, unbundler)
-
-    # this server routes each push to bundle store
-    if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
-        return storetobundlestore(orig, repo, op, unbundler)
-
-    if unbundler.params.get(b'infinitepush') != b'True':
-        return orig(repo, op, unbundler)
-
-    handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
-
-    bundler = bundle2.bundle20(repo.ui)
-    cgparams = None
-    with bundle2.partiterator(repo, op, unbundler) as parts:
-        for part in parts:
-            bundlepart = None
-            if part.type == b'replycaps':
-                # This configures the current operation to allow reply parts.
-                bundle2._processpart(op, part)
-            elif part.type == bundleparts.scratchbranchparttype:
-                # Scratch branch parts need to be converted to normal
-                # changegroup parts, and the extra parameters stored for later
-                # when we upload to the store. Eventually those parameters will
-                # be put on the actual bundle instead of this part, then we can
-                # send a vanilla changegroup instead of the scratchbranch part.
-                cgversion = part.params.get(b'cgversion', b'01')
-                bundlepart = bundle2.bundlepart(
-                    b'changegroup', data=part.read()
-                )
-                bundlepart.addparam(b'version', cgversion)
-                cgparams = part.params
-
-                # If we're not dumping all parts into the new bundle, we need to
-                # alert the future pushkey and phase-heads handler to skip
-                # the part.
-                if not handleallparts:
-                    op.records.add(
-                        scratchbranchparttype + b'_skippushkey', True
-                    )
-                    op.records.add(
-                        scratchbranchparttype + b'_skipphaseheads', True
-                    )
-            else:
-                if handleallparts:
-                    # Ideally we would not process any parts, and instead just
-                    # forward them to the bundle for storage, but since this
-                    # differs from previous behavior, we need to put it behind a
-                    # config flag for incremental rollout.
-                    bundlepart = bundle2.bundlepart(part.type, data=part.read())
-                    for key, value in part.params.items():
-                        bundlepart.addparam(key, value)
-
-                    # Certain parts require a response
-                    if part.type == b'pushkey':
-                        if op.reply is not None:
-                            rpart = op.reply.newpart(b'reply:pushkey')
-                            rpart.addparam(
-                                b'in-reply-to', str(part.id), mandatory=False
-                            )
-                            rpart.addparam(b'return', b'1', mandatory=False)
-                else:
-                    bundle2._processpart(op, part)
-
-            if handleallparts:
-                op.records.add(
-                    part.type,
-                    {
-                        b'return': 1,
-                    },
-                )
-            if bundlepart:
-                bundler.addpart(bundlepart)
-
-    # If commits were sent, store them
-    if cgparams:
-        buf = util.chunkbuffer(bundler.getchunks())
-        fd, bundlefile = pycompat.mkstemp()
-        try:
-            try:
-                fp = os.fdopen(fd, 'wb')
-                fp.write(buf.read())
-            finally:
-                fp.close()
-            storebundle(op, cgparams, bundlefile)
-        finally:
-            try:
-                os.unlink(bundlefile)
-            except Exception:
-                # we would rather see the original exception
-                pass
-
-
-def storebundle(op, params, bundlefile):
-    log = _getorcreateinfinitepushlogger(op)
-    parthandlerstart = time.time()
-    log(scratchbranchparttype, eventtype=b'start')
-    index = op.repo.bundlestore.index
-    store = op.repo.bundlestore.store
-    op.records.add(scratchbranchparttype + b'_skippushkey', True)
-
-    bundle = None
-    try:  # guards bundle
-        bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
-        bundle = hg.repository(op.repo.ui, bundlepath)
-
-        bookmark = params.get(b'bookmark')
-        bookprevnode = params.get(b'bookprevnode', b'')
-        force = params.get(b'force')
-
-        if bookmark:
-            oldnode = index.getnode(bookmark)
-        else:
-            oldnode = None
-        bundleheads = bundle.revs(b'heads(bundle())')
-        if bookmark and len(bundleheads) > 1:
-            raise error.Abort(
-                _(b'cannot push more than one head to a scratch branch')
-            )
-
-        revs = _getrevs(bundle, oldnode, force, bookmark)
-
-        # Notify the user of what is being pushed
-        plural = b's' if len(revs) > 1 else b''
-        op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
-        maxoutput = 10
-        for i in range(0, min(len(revs), maxoutput)):
-            firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
-            op.repo.ui.warn(b"    %s  %s\n" % (revs[i], firstline))
-
-        if len(revs) > maxoutput + 1:
-            op.repo.ui.warn(b"    ...\n")
-            firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
-            op.repo.ui.warn(b"    %s  %s\n" % (revs[-1], firstline))
-
-        nodesctx = [bundle[rev] for rev in revs]
-        inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
-        if bundleheads:
-            newheadscount = sum(not inindex(rev) for rev in bundleheads)
-        else:
-            newheadscount = 0
-        # If there's a bookmark specified, there should be only one head,
-        # so we choose the last node, which will be that head.
-        # If a bug or malicious client allows there to be a bookmark
-        # with multiple heads, we will place the bookmark on the last head.
-        bookmarknode = nodesctx[-1].hex() if nodesctx else None
-        key = None
-        if newheadscount:
-            with open(bundlefile, b'rb') as f:
-                bundledata = f.read()
-                with logservicecall(
-                    log, b'bundlestore', bundlesize=len(bundledata)
-                ):
-                    bundlesizelimit = 100 * 1024 * 1024  # 100 MB
-                    if len(bundledata) > bundlesizelimit:
-                        error_msg = (
-                            b'bundle is too big: %d bytes. '
-                            + b'max allowed size is 100 MB'
-                        )
-                        raise error.Abort(error_msg % (len(bundledata),))
-                    key = store.write(bundledata)
-
-        with logservicecall(log, b'index', newheadscount=newheadscount), index:
-            if key:
-                index.addbundle(key, nodesctx)
-            if bookmark:
-                index.addbookmark(bookmark, bookmarknode)
-                _maybeaddpushbackpart(
-                    op, bookmark, bookmarknode, bookprevnode, params
-                )
-        log(
-            scratchbranchparttype,
-            eventtype=b'success',
-            elapsedms=(time.time() - parthandlerstart) * 1000,
-        )
-
-    except Exception as e:
-        log(
-            scratchbranchparttype,
-            eventtype=b'failure',
-            elapsedms=(time.time() - parthandlerstart) * 1000,
-            errormsg=stringutil.forcebytestr(e),
-        )
-        raise
-    finally:
-        if bundle:
-            bundle.close()
-
-
-@bundle2.parthandler(
-    scratchbranchparttype,
-    (
-        b'bookmark',
-        b'bookprevnode',
-        b'force',
-        b'pushbackbookmarks',
-        b'cgversion',
-    ),
-)
-def bundle2scratchbranch(op, part):
-    '''unbundle a bundle2 part containing a changegroup to store'''
-
-    bundler = bundle2.bundle20(op.repo.ui)
-    cgversion = part.params.get(b'cgversion', b'01')
-    cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
-    cgpart.addparam(b'version', cgversion)
-    bundler.addpart(cgpart)
-    buf = util.chunkbuffer(bundler.getchunks())
-
-    fd, bundlefile = pycompat.mkstemp()
-    try:
-        try:
-            fp = os.fdopen(fd, 'wb')
-            fp.write(buf.read())
-        finally:
-            fp.close()
-        storebundle(op, part.params, bundlefile)
-    finally:
-        try:
-            os.unlink(bundlefile)
-        except FileNotFoundError:
-            pass
-
-    return 1
-
-
-def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
-    if params.get(b'pushbackbookmarks'):
-        if op.reply and b'pushback' in op.reply.capabilities:
-            params = {
-                b'namespace': b'bookmarks',
-                b'key': bookmark,
-                b'new': newnode,
-                b'old': oldnode,
-            }
-            op.reply.newpart(b'pushkey', mandatoryparams=params.items())
-
-
-def bundle2pushkey(orig, op, part):
-    """Wrapper of bundle2.handlepushkey()
-
-    The only goal is to skip calling the original function if flag is set.
-    It's set if infinitepush push is happening.
-    """
-    if op.records[scratchbranchparttype + b'_skippushkey']:
-        if op.reply is not None:
-            rpart = op.reply.newpart(b'reply:pushkey')
-            rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
-            rpart.addparam(b'return', b'1', mandatory=False)
-        return 1
-
-    return orig(op, part)
-
-
-def bundle2handlephases(orig, op, part):
-    """Wrapper of bundle2.handlephases()
-
-    The only goal is to skip calling the original function if flag is set.
-    It's set if infinitepush push is happening.
-    """
-
-    if op.records[scratchbranchparttype + b'_skipphaseheads']:
-        return
-
-    return orig(op, part)
-
-
-def _asyncsavemetadata(root, nodes):
-    """starts a separate process that fills metadata for the nodes
-
-    This function creates a separate process and doesn't wait for it's
-    completion. This was done to avoid slowing down pushes
-    """
-
-    maxnodes = 50
-    if len(nodes) > maxnodes:
-        return
-    nodesargs = []
-    for node in nodes:
-        nodesargs.append(b'--node')
-        nodesargs.append(node)
-    with open(os.devnull, b'w+b') as devnull:
-        cmdline = [
-            util.hgexecutable(),
-            b'debugfillinfinitepushmetadata',
-            b'-R',
-            root,
-        ] + nodesargs
-        # Process will run in background. We don't care about the return code
-        subprocess.Popen(
-            pycompat.rapply(procutil.tonativestr, cmdline),
-            close_fds=True,
-            shell=False,
-            stdin=devnull,
-            stdout=devnull,
-            stderr=devnull,
-        )
--- a/hgext/infinitepush/bundleparts.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-# Copyright 2017 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-
-from mercurial.i18n import _
-from mercurial.node import hex
-
-from mercurial import (
-    bundle2,
-    changegroup,
-    error,
-    extensions,
-    revsetlang,
-    util,
-)
-
-from . import common
-
-isremotebooksenabled = common.isremotebooksenabled
-
-scratchbranchparttype = b'b2x:infinitepush'
-
-
-def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
-    if not outgoing.missing:
-        raise error.Abort(_(b'no commits to push'))
-
-    if scratchbranchparttype not in bundle2.bundle2caps(peer):
-        raise error.Abort(
-            _(b'no server support for %r') % scratchbranchparttype
-        )
-
-    _validaterevset(
-        repo, revsetlang.formatspec(b'%ln', outgoing.missing), bookmark
-    )
-
-    supportedversions = changegroup.supportedoutgoingversions(repo)
-    # Explicitly avoid using '01' changegroup version in infinitepush to
-    # support general delta
-    supportedversions.discard(b'01')
-    cgversion = min(supportedversions)
-    _handlelfs(repo, outgoing.missing)
-    cg = changegroup.makestream(repo, outgoing, cgversion, b'push')
-
-    params = {}
-    params[b'cgversion'] = cgversion
-    if bookmark:
-        params[b'bookmark'] = bookmark
-        # 'prevbooknode' is necessary for pushkey reply part
-        params[b'bookprevnode'] = b''
-        bookmarks = repo._bookmarks
-        if bookmark in bookmarks:
-            params[b'bookprevnode'] = hex(bookmarks[bookmark])
-
-    # Do not send pushback bundle2 part with bookmarks if remotenames extension
-    # is enabled. It will be handled manually in `_push()`
-    if not isremotebooksenabled(ui):
-        params[b'pushbackbookmarks'] = b'1'
-
-    parts = []
-
-    # .upper() marks this as a mandatory part: server will abort if there's no
-    #  handler
-    parts.append(
-        bundle2.bundlepart(
-            scratchbranchparttype.upper(),
-            advisoryparams=params.items(),
-            data=cg,
-        )
-    )
-
-    return parts
-
-
-def _validaterevset(repo, revset, bookmark):
-    """Abort if the revs to be pushed aren't valid for a scratch branch."""
-    if not repo.revs(revset):
-        raise error.Abort(_(b'nothing to push'))
-    if bookmark:
-        # Allow bundle with many heads only if no bookmark is specified
-        heads = repo.revs(b'heads(%r)', revset)
-        if len(heads) > 1:
-            raise error.Abort(
-                _(b'cannot push more than one head to a scratch branch')
-            )
-
-
-def _handlelfs(repo, missing):
-    """Special case if lfs is enabled
-
-    If lfs is enabled then we need to call prepush hook
-    to make sure large files are uploaded to lfs
-    """
-    try:
-        lfsmod = extensions.find(b'lfs')
-        lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
-    except KeyError:
-        # Ignore if lfs extension is not enabled
-        return
-
-
-class copiedpart:
-    """a copy of unbundlepart content that can be consumed later"""
-
-    def __init__(self, part):
-        # copy "public properties"
-        self.type = part.type
-        self.id = part.id
-        self.mandatory = part.mandatory
-        self.mandatoryparams = part.mandatoryparams
-        self.advisoryparams = part.advisoryparams
-        self.params = part.params
-        self.mandatorykeys = part.mandatorykeys
-        # copy the buffer
-        self._io = util.stringio(part.read())
-
-    def consume(self):
-        return
-
-    def read(self, size=None):
-        if size is None:
-            return self._io.read()
-        else:
-            return self._io.read(size)
--- a/hgext/infinitepush/common.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-# Copyright 2017 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-
-import os
-
-from mercurial.node import hex
-
-from mercurial import (
-    error,
-    extensions,
-    pycompat,
-)
-
-
-def isremotebooksenabled(ui):
-    return b'remotenames' in extensions._extensions and ui.configbool(
-        b'remotenames', b'bookmarks'
-    )
-
-
-def downloadbundle(repo, unknownbinhead):
-    index = repo.bundlestore.index
-    store = repo.bundlestore.store
-    bundleid = index.getbundle(hex(unknownbinhead))
-    if bundleid is None:
-        raise error.Abort(b'%s head is not known' % hex(unknownbinhead))
-    bundleraw = store.read(bundleid)
-    return _makebundlefromraw(bundleraw)
-
-
-def _makebundlefromraw(data):
-    fp = None
-    fd, bundlefile = pycompat.mkstemp()
-    try:  # guards bundlefile
-        try:  # guards fp
-            fp = os.fdopen(fd, 'wb')
-            fp.write(data)
-        finally:
-            fp.close()
-    except Exception:
-        try:
-            os.unlink(bundlefile)
-        except Exception:
-            # we would rather see the original exception
-            pass
-        raise
-
-    return bundlefile
--- a/hgext/infinitepush/fileindexapi.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-# Infinite push
-#
-# Copyright 2016 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-"""
-    [infinitepush]
-    # Server-side option. Used only if indextype=disk.
-    # Filesystem path to the index store
-    indexpath = PATH
-"""
-
-
-import os
-
-from mercurial import util
-
-from mercurial.utils import stringutil
-
-from . import indexapi
-
-
-class fileindexapi(indexapi.indexapi):
-    def __init__(self, repo):
-        super(fileindexapi, self).__init__()
-        self._repo = repo
-        root = repo.ui.config(b'infinitepush', b'indexpath')
-        if not root:
-            root = os.path.join(b'scratchbranches', b'index')
-
-        self._nodemap = os.path.join(root, b'nodemap')
-        self._bookmarkmap = os.path.join(root, b'bookmarkmap')
-        self._metadatamap = os.path.join(root, b'nodemetadatamap')
-        self._lock = None
-
-    def __enter__(self):
-        self._lock = self._repo.wlock()
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        if self._lock:
-            self._lock.__exit__(exc_type, exc_val, exc_tb)
-
-    def addbundle(self, bundleid, nodesctx):
-        for node in nodesctx:
-            nodepath = os.path.join(self._nodemap, node.hex())
-            self._write(nodepath, bundleid)
-
-    def addbookmark(self, bookmark, node):
-        bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
-        self._write(bookmarkpath, node)
-
-    def addmanybookmarks(self, bookmarks):
-        for bookmark, node in bookmarks.items():
-            self.addbookmark(bookmark, node)
-
-    def deletebookmarks(self, patterns):
-        for pattern in patterns:
-            for bookmark, _ in self._listbookmarks(pattern):
-                bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
-                self._delete(bookmarkpath)
-
-    def getbundle(self, node):
-        nodepath = os.path.join(self._nodemap, node)
-        return self._read(nodepath)
-
-    def getnode(self, bookmark):
-        bookmarkpath = os.path.join(self._bookmarkmap, bookmark)
-        return self._read(bookmarkpath)
-
-    def getbookmarks(self, query):
-        return dict(self._listbookmarks(query))
-
-    def saveoptionaljsonmetadata(self, node, jsonmetadata):
-        vfs = self._repo.vfs
-        vfs.write(os.path.join(self._metadatamap, node), jsonmetadata)
-
-    def _listbookmarks(self, pattern):
-        if pattern.endswith(b'*'):
-            pattern = b're:^' + pattern[:-1] + b'.*'
-        kind, pat, matcher = stringutil.stringmatcher(pattern)
-        prefixlen = len(self._bookmarkmap) + 1
-        for dirpath, _, books in self._repo.vfs.walk(self._bookmarkmap):
-            for book in books:
-                bookmark = os.path.join(dirpath, book)[prefixlen:]
-                bookmark = util.pconvert(bookmark)
-                if not matcher(bookmark):
-                    continue
-                yield bookmark, self._read(os.path.join(dirpath, book))
-
-    def _write(self, path, value):
-        vfs = self._repo.vfs
-        dirname = vfs.dirname(path)
-        if not vfs.exists(dirname):
-            vfs.makedirs(dirname)
-
-        vfs.write(path, value)
-
-    def _read(self, path):
-        vfs = self._repo.vfs
-        if not vfs.exists(path):
-            return None
-        return vfs.read(path)
-
-    def _delete(self, path):
-        vfs = self._repo.vfs
-        if not vfs.exists(path):
-            return
-        return vfs.unlink(path)
--- a/hgext/infinitepush/indexapi.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-# Infinite push
-#
-# Copyright 2016 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-
-class indexapi:
-    """Class that manages access to infinitepush index.
-
-    This class is a context manager and all write operations (like
-    deletebookmarks, addbookmark etc) should use `with` statement:
-
-      with index:
-          index.deletebookmarks(...)
-          ...
-    """
-
-    def __init__(self):
-        """Initializes the metadata store connection."""
-
-    def close(self):
-        """Cleans up the metadata store connection."""
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        pass
-
-    def addbundle(self, bundleid, nodesctx):
-        """Takes a bundleid and a list of node contexts for each node
-        in that bundle and records that."""
-        raise NotImplementedError()
-
-    def addbookmark(self, bookmark, node):
-        """Takes a bookmark name and hash, and records mapping in the metadata
-        store."""
-        raise NotImplementedError()
-
-    def addmanybookmarks(self, bookmarks):
-        """Takes a dict with mapping from bookmark to hash and records mapping
-        in the metadata store."""
-        raise NotImplementedError()
-
-    def deletebookmarks(self, patterns):
-        """Accepts list of bookmarks and deletes them."""
-        raise NotImplementedError()
-
-    def getbundle(self, node):
-        """Returns the bundleid for the bundle that contains the given node."""
-        raise NotImplementedError()
-
-    def getnode(self, bookmark):
-        """Returns the node for the given bookmark. None if it doesn't exist."""
-        raise NotImplementedError()
-
-    def getbookmarks(self, query):
-        """Returns bookmarks that match the query"""
-        raise NotImplementedError()
-
-    def saveoptionaljsonmetadata(self, node, jsonmetadata):
-        """Saves optional metadata for a given node"""
-        raise NotImplementedError()
-
-
-class indexexception(Exception):
-    pass
--- a/hgext/infinitepush/schema.sql	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-CREATE TABLE `bookmarkstonode` (
-  `node` varbinary(64) NOT NULL,
-  `bookmark` varbinary(512) NOT NULL,
-  `reponame` varbinary(255) NOT NULL,
-  PRIMARY KEY (`reponame`,`bookmark`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-CREATE TABLE `bundles` (
-  `bundle` varbinary(512) NOT NULL,
-  `reponame` varbinary(255) NOT NULL,
-  PRIMARY KEY (`bundle`,`reponame`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-CREATE TABLE `nodestobundle` (
-  `node` varbinary(64) NOT NULL,
-  `bundle` varbinary(512) NOT NULL,
-  `reponame` varbinary(255) NOT NULL,
-  PRIMARY KEY (`node`,`reponame`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-
-CREATE TABLE `nodesmetadata` (
-  `node` varbinary(64) NOT NULL,
-  `message` mediumblob NOT NULL,
-  `p1` varbinary(64) NOT NULL,
-  `p2` varbinary(64) DEFAULT NULL,
-  `author` varbinary(255) NOT NULL,
-  `committer` varbinary(255) DEFAULT NULL,
-  `author_date` bigint(20) NOT NULL,
-  `committer_date` bigint(20) DEFAULT NULL,
-  `reponame` varbinary(255) NOT NULL,
-  `optional_json_metadata` mediumblob,
-  PRIMARY KEY (`reponame`,`node`)
-) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--- a/hgext/infinitepush/sqlindexapi.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,295 +0,0 @@
-# Infinite push
-#
-# Copyright 2016 Facebook, Inc.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-
-import logging
-import os
-import time
-
-import warnings
-import mysql.connector
-
-from . import indexapi
-
-
-def _convertbookmarkpattern(pattern):
-    pattern = pattern.replace(b'_', b'\\_')
-    pattern = pattern.replace(b'%', b'\\%')
-    if pattern.endswith(b'*'):
-        pattern = pattern[:-1] + b'%'
-    return pattern
-
-
-class sqlindexapi(indexapi.indexapi):
-    """
-    Sql backend for infinitepush index. See schema.sql
-    """
-
-    def __init__(
-        self,
-        reponame,
-        host,
-        port,
-        database,
-        user,
-        password,
-        logfile,
-        loglevel,
-        waittimeout=300,
-        locktimeout=120,
-    ):
-        super(sqlindexapi, self).__init__()
-        self.reponame = reponame
-        self.sqlargs = {
-            b'host': host,
-            b'port': port,
-            b'database': database,
-            b'user': user,
-            b'password': password,
-        }
-        self.sqlconn = None
-        self.sqlcursor = None
-        if not logfile:
-            logfile = os.devnull
-        logging.basicConfig(filename=logfile)
-        self.log = logging.getLogger()
-        self.log.setLevel(loglevel)
-        self._connected = False
-        self._waittimeout = waittimeout
-        self._locktimeout = locktimeout
-
-    def sqlconnect(self):
-        if self.sqlconn:
-            raise indexapi.indexexception(b"SQL connection already open")
-        if self.sqlcursor:
-            raise indexapi.indexexception(
-                b"SQL cursor already open without connection"
-            )
-        retry = 3
-        while True:
-            try:
-                self.sqlconn = mysql.connector.connect(**self.sqlargs)
-
-                # Code is copy-pasted from hgsql. Bug fixes need to be
-                # back-ported!
-                # The default behavior is to return byte arrays, when we
-                # need strings. This custom convert returns strings.
-                self.sqlconn.set_converter_class(CustomConverter)
-                self.sqlconn.autocommit = False
-                break
-            except mysql.connector.errors.Error:
-                # mysql can be flakey occasionally, so do some minimal
-                # retrying.
-                retry -= 1
-                if retry == 0:
-                    raise
-                time.sleep(0.2)
-
-        waittimeout = self.sqlconn.converter.escape(b'%s' % self._waittimeout)
-
-        self.sqlcursor = self.sqlconn.cursor()
-        self.sqlcursor.execute(b"SET wait_timeout=%s" % waittimeout)
-        self.sqlcursor.execute(
-            b"SET innodb_lock_wait_timeout=%s" % self._locktimeout
-        )
-        self._connected = True
-
-    def close(self):
-        """Cleans up the metadata store connection."""
-        with warnings.catch_warnings():
-            warnings.simplefilter(b"ignore")
-            self.sqlcursor.close()
-            self.sqlconn.close()
-        self.sqlcursor = None
-        self.sqlconn = None
-
-    def __enter__(self):
-        if not self._connected:
-            self.sqlconnect()
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        if exc_type is None:
-            self.sqlconn.commit()
-        else:
-            self.sqlconn.rollback()
-
-    def addbundle(self, bundleid, nodesctx):
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(b"ADD BUNDLE %r %r" % (self.reponame, bundleid))
-        self.sqlcursor.execute(
-            b"INSERT INTO bundles(bundle, reponame) VALUES (%s, %s)",
-            params=(bundleid, self.reponame),
-        )
-        for ctx in nodesctx:
-            self.sqlcursor.execute(
-                b"INSERT INTO nodestobundle(node, bundle, reponame) "
-                b"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE "
-                b"bundle=VALUES(bundle)",
-                params=(ctx.hex(), bundleid, self.reponame),
-            )
-
-            extra = ctx.extra()
-            author_name = ctx.user()
-            committer_name = extra.get(b'committer', ctx.user())
-            author_date = int(ctx.date()[0])
-            committer_date = int(extra.get(b'committer_date', author_date))
-            self.sqlcursor.execute(
-                b"INSERT IGNORE INTO nodesmetadata(node, message, p1, p2, "
-                b"author, committer, author_date, committer_date, "
-                b"reponame) VALUES "
-                b"(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
-                params=(
-                    ctx.hex(),
-                    ctx.description(),
-                    ctx.p1().hex(),
-                    ctx.p2().hex(),
-                    author_name,
-                    committer_name,
-                    author_date,
-                    committer_date,
-                    self.reponame,
-                ),
-            )
-
-    def addbookmark(self, bookmark, node):
-        """Takes a bookmark name and hash, and records mapping in the metadata
-        store."""
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(
-            b"ADD BOOKMARKS %r bookmark: %r node: %r"
-            % (self.reponame, bookmark, node)
-        )
-        self.sqlcursor.execute(
-            b"INSERT INTO bookmarkstonode(bookmark, node, reponame) "
-            b"VALUES (%s, %s, %s) ON DUPLICATE KEY UPDATE node=VALUES(node)",
-            params=(bookmark, node, self.reponame),
-        )
-
-    def addmanybookmarks(self, bookmarks):
-        if not self._connected:
-            self.sqlconnect()
-        args = []
-        values = []
-        for bookmark, node in bookmarks.items():
-            args.append(b'(%s, %s, %s)')
-            values.extend((bookmark, node, self.reponame))
-        args = b','.join(args)
-
-        self.sqlcursor.execute(
-            b"INSERT INTO bookmarkstonode(bookmark, node, reponame) "
-            b"VALUES %s ON DUPLICATE KEY UPDATE node=VALUES(node)" % args,
-            params=values,
-        )
-
-    def deletebookmarks(self, patterns):
-        """Accepts list of bookmark patterns and deletes them.
-        If `commit` is set then bookmark will actually be deleted. Otherwise
-        deletion will be delayed until the end of transaction.
-        """
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(b"DELETE BOOKMARKS: %s" % patterns)
-        for pattern in patterns:
-            pattern = _convertbookmarkpattern(pattern)
-            self.sqlcursor.execute(
-                b"DELETE from bookmarkstonode WHERE bookmark LIKE (%s) "
-                b"and reponame = %s",
-                params=(pattern, self.reponame),
-            )
-
-    def getbundle(self, node):
-        """Returns the bundleid for the bundle that contains the given node."""
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(b"GET BUNDLE %r %r" % (self.reponame, node))
-        self.sqlcursor.execute(
-            b"SELECT bundle from nodestobundle "
-            b"WHERE node = %s AND reponame = %s",
-            params=(node, self.reponame),
-        )
-        result = self.sqlcursor.fetchall()
-        if len(result) != 1 or len(result[0]) != 1:
-            self.log.info(b"No matching node")
-            return None
-        bundle = result[0][0]
-        self.log.info(b"Found bundle %r" % bundle)
-        return bundle
-
-    def getnode(self, bookmark):
-        """Returns the node for the given bookmark. None if it doesn't exist."""
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(
-            b"GET NODE reponame: %r bookmark: %r" % (self.reponame, bookmark)
-        )
-        self.sqlcursor.execute(
-            b"SELECT node from bookmarkstonode WHERE "
-            b"bookmark = %s AND reponame = %s",
-            params=(bookmark, self.reponame),
-        )
-        result = self.sqlcursor.fetchall()
-        if len(result) != 1 or len(result[0]) != 1:
-            self.log.info(b"No matching bookmark")
-            return None
-        node = result[0][0]
-        self.log.info(b"Found node %r" % node)
-        return node
-
-    def getbookmarks(self, query):
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(
-            b"QUERY BOOKMARKS reponame: %r query: %r" % (self.reponame, query)
-        )
-        query = _convertbookmarkpattern(query)
-        self.sqlcursor.execute(
-            b"SELECT bookmark, node from bookmarkstonode WHERE "
-            b"reponame = %s AND bookmark LIKE %s",
-            params=(self.reponame, query),
-        )
-        result = self.sqlcursor.fetchall()
-        bookmarks = {}
-        for row in result:
-            if len(row) != 2:
-                self.log.info(b"Bad row returned: %s" % row)
-                continue
-            bookmarks[row[0]] = row[1]
-        return bookmarks
-
-    def saveoptionaljsonmetadata(self, node, jsonmetadata):
-        if not self._connected:
-            self.sqlconnect()
-        self.log.info(
-            (
-                b"INSERT METADATA, QUERY BOOKMARKS reponame: %r "
-                + b"node: %r, jsonmetadata: %s"
-            )
-            % (self.reponame, node, jsonmetadata)
-        )
-
-        self.sqlcursor.execute(
-            b"UPDATE nodesmetadata SET optional_json_metadata=%s WHERE "
-            b"reponame=%s AND node=%s",
-            params=(jsonmetadata, self.reponame, node),
-        )
-
-
-class CustomConverter(mysql.connector.conversion.MySQLConverter):
-    """Ensure that all values being returned are returned as python string
-    (versus the default byte arrays)."""
-
-    def _STRING_to_python(self, value, dsc=None):
-        return str(value)
-
-    def _VAR_STRING_to_python(self, value, dsc=None):
-        return str(value)
-
-    def _BLOB_to_python(self, value, dsc=None):
-        return str(value)
--- a/hgext/infinitepush/store.py	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,194 +0,0 @@
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# based on bundleheads extension by Gregory Szorc <gps@mozilla.com>
-
-
-import abc
-import os
-import subprocess
-
-from mercurial.node import hex
-from mercurial.pycompat import open
-from mercurial import pycompat
-from mercurial.utils import (
-    hashutil,
-    procutil,
-)
-
-
-class BundleWriteException(Exception):
-    pass
-
-
-class BundleReadException(Exception):
-    pass
-
-
-class abstractbundlestore:  # pytype: disable=ignored-metaclass
-    """Defines the interface for bundle stores.
-
-    A bundle store is an entity that stores raw bundle data. It is a simple
-    key-value store. However, the keys are chosen by the store. The keys can
-    be any Python object understood by the corresponding bundle index (see
-    ``abstractbundleindex`` below).
-    """
-
-    __metaclass__ = abc.ABCMeta
-
-    @abc.abstractmethod
-    def write(self, data):
-        """Write bundle data to the store.
-
-        This function receives the raw data to be written as a str.
-        Throws BundleWriteException
-        The key of the written data MUST be returned.
-        """
-
-    @abc.abstractmethod
-    def read(self, key):
-        """Obtain bundle data for a key.
-
-        Returns None if the bundle isn't known.
-        Throws BundleReadException
-        The returned object should be a file object supporting read()
-        and close().
-        """
-
-
-class filebundlestore:
-    """bundle store in filesystem
-
-    meant for storing bundles somewhere on disk and on network filesystems
-    """
-
-    def __init__(self, ui, repo):
-        self.ui = ui
-        self.repo = repo
-        self.storepath = ui.configpath(b'scratchbranch', b'storepath')
-        if not self.storepath:
-            self.storepath = self.repo.vfs.join(
-                b"scratchbranches", b"filebundlestore"
-            )
-        if not os.path.exists(self.storepath):
-            os.makedirs(self.storepath)
-
-    def _dirpath(self, hashvalue):
-        """First two bytes of the hash are the name of the upper
-        level directory, next two bytes are the name of the
-        next level directory"""
-        return os.path.join(self.storepath, hashvalue[0:2], hashvalue[2:4])
-
-    def _filepath(self, filename):
-        return os.path.join(self._dirpath(filename), filename)
-
-    def write(self, data):
-        filename = hex(hashutil.sha1(data).digest())
-        dirpath = self._dirpath(filename)
-
-        if not os.path.exists(dirpath):
-            os.makedirs(dirpath)
-
-        with open(self._filepath(filename), b'wb') as f:
-            f.write(data)
-
-        return filename
-
-    def read(self, key):
-        try:
-            with open(self._filepath(key), b'rb') as f:
-                return f.read()
-        except IOError:
-            return None
-
-
-def format_placeholders_args(args, filename=None, handle=None):
-    """Formats `args` with Infinitepush replacements.
-
-    Hack to get `str.format()`-ed strings working in a BC way with
-    bytes.
-    """
-    formatted_args = []
-    for arg in args:
-        if filename and arg == b'{filename}':
-            formatted_args.append(filename)
-        elif handle and arg == b'{handle}':
-            formatted_args.append(handle)
-        else:
-            formatted_args.append(arg)
-    return formatted_args
-
-
-class externalbundlestore(abstractbundlestore):
-    def __init__(self, put_binary, put_args, get_binary, get_args):
-        """
-        `put_binary` - path to binary file which uploads bundle to external
-            storage and prints key to stdout
-        `put_args` - format string with additional args to `put_binary`
-                     {filename} replacement field can be used.
-        `get_binary` - path to binary file which accepts filename and key
-            (in that order), downloads bundle from store and saves it to file
-        `get_args` - format string with additional args to `get_binary`.
-                     {filename} and {handle} replacement field can be used.
-        """
-
-        self.put_args = put_args
-        self.get_args = get_args
-        self.put_binary = put_binary
-        self.get_binary = get_binary
-
-    def _call_binary(self, args):
-        p = subprocess.Popen(
-            pycompat.rapply(procutil.tonativestr, args),
-            stdout=subprocess.PIPE,
-            stderr=subprocess.PIPE,
-            close_fds=True,
-        )
-        stdout, stderr = p.communicate()
-        returncode = p.returncode
-        return returncode, stdout, stderr
-
-    def write(self, data):
-        # Won't work on windows because you can't open file second time without
-        # closing it
-        # TODO: rewrite without str.format() and replace NamedTemporaryFile()
-        # with pycompat.namedtempfile()
-        with pycompat.namedtempfile() as temp:
-            temp.write(data)
-            temp.flush()
-            temp.seek(0)
-            formatted_args = format_placeholders_args(
-                self.put_args, filename=temp.name
-            )
-            returncode, stdout, stderr = self._call_binary(
-                [self.put_binary] + formatted_args
-            )
-
-            if returncode != 0:
-                raise BundleWriteException(
-                    b'Failed to upload to external store: %s' % stderr
-                )
-            stdout_lines = stdout.splitlines()
-            if len(stdout_lines) == 1:
-                return stdout_lines[0]
-            else:
-                raise BundleWriteException(
-                    b'Bad output from %s: %s' % (self.put_binary, stdout)
-                )
-
-    def read(self, handle):
-        # Won't work on windows because you can't open file second time without
-        # closing it
-        with pycompat.namedtempfile() as temp:
-            formatted_args = format_placeholders_args(
-                self.get_args, filename=temp.name, handle=handle
-            )
-            returncode, stdout, stderr = self._call_binary(
-                [self.get_binary] + formatted_args
-            )
-
-            if returncode != 0:
-                raise BundleReadException(
-                    b'Failed to download from external store: %s' % stderr
-                )
-            return temp.read()
--- a/hgext/journal.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/journal.py	Tue Nov 07 15:21:11 2023 +0100
@@ -66,13 +66,13 @@
 
 # Journal recording, register hooks and storage object
 def extsetup(ui):
-    extensions.wrapfunction(dispatch, b'runcommand', runcommand)
-    extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks)
+    extensions.wrapfunction(dispatch, 'runcommand', runcommand)
+    extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
     extensions.wrapfilecache(
         localrepo.localrepository, b'dirstate', wrapdirstate
     )
-    extensions.wrapfunction(hg, b'postshare', wrappostshare)
-    extensions.wrapfunction(hg, b'copystore', unsharejournal)
+    extensions.wrapfunction(hg, 'postshare', wrappostshare)
+    extensions.wrapfunction(hg, 'copystore', unsharejournal)
 
 
 def reposetup(ui, repo):
@@ -103,7 +103,7 @@
 def wrapdirstate(orig, repo):
     """Make journal storage available to the dirstate object"""
     dirstate = orig(repo)
-    if util.safehasattr(repo, 'journal'):
+    if hasattr(repo, 'journal'):
         _setupdirstate(repo, dirstate)
     return dirstate
 
@@ -112,7 +112,7 @@
     """Records all dirstate parent changes in the journal."""
     old = list(old)
     new = list(new)
-    if util.safehasattr(dirstate, 'journalstorage'):
+    if hasattr(dirstate, 'journalstorage'):
         # only record two hashes if there was a merge
         oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
         newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
@@ -125,9 +125,12 @@
 def recordbookmarks(orig, store, fp):
     """Records all bookmark changes in the journal."""
     repo = store._repo
-    if util.safehasattr(repo, 'journal'):
+    if hasattr(repo, 'journal'):
         oldmarks = bookmarks.bmstore(repo)
-        for mark, value in store.items():
+        all_marks = set(b for b, n in oldmarks.items())
+        all_marks.update(b for b, n in store.items())
+        for mark in sorted(all_marks):
+            value = store.get(mark, repo.nullid)
             oldvalue = oldmarks.get(mark, repo.nullid)
             if value != oldvalue:
                 repo.journal.record(bookmarktype, mark, oldvalue, value)
@@ -182,11 +185,7 @@
 
 def unsharejournal(orig, ui, repo, repopath):
     """Copy shared journal entries into this repo when unsharing"""
-    if (
-        repo.path == repopath
-        and repo.shared()
-        and util.safehasattr(repo, 'journal')
-    ):
+    if repo.path == repopath and repo.shared() and hasattr(repo, 'journal'):
         sharedrepo = hg.sharedreposource(repo)
         sharedfeatures = _readsharedfeatures(repo)
         if sharedrepo and sharedfeatures > {b'journal'}:
--- a/hgext/keyword.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/keyword.py	Tue Nov 07 15:21:11 2023 +0100
@@ -88,7 +88,6 @@
 import weakref
 
 from mercurial.i18n import _
-from mercurial.pycompat import getattr
 from mercurial.hgweb import webcommands
 
 from mercurial import (
@@ -131,7 +130,7 @@
 )
 
 # webcommands that do not act on keywords
-nokwwebcommands = b'annotate changeset rev filediff diff comparison'
+nokwwebcommands = 'annotate changeset rev filediff diff comparison'
 
 # hg commands that trigger expansion only when writing to working dir,
 # not when reading filelog, and unexpand when reading from working dir
@@ -420,11 +419,10 @@
     """Bails out if [keyword] configuration is not active.
     Returns status of working directory."""
     if kwt:
-        opts = pycompat.byteskwargs(opts)
         return repo.status(
-            match=scmutil.match(wctx, pats, opts),
+            match=scmutil.match(wctx, pats, pycompat.byteskwargs(opts)),
             clean=True,
-            unknown=opts.get(b'unknown') or opts.get(b'all'),
+            unknown=opts.get('unknown') or opts.get('all'),
         )
     if ui.configitems(b'keyword'):
         raise error.Abort(_(b'[keyword] patterns cannot match'))
@@ -604,26 +602,26 @@
     else:
         cwd = b''
     files = []
-    opts = pycompat.byteskwargs(opts)
-    if not opts.get(b'unknown') or opts.get(b'all'):
+
+    if not opts.get('unknown') or opts.get('all'):
         files = sorted(status.modified + status.added + status.clean)
     kwfiles = kwt.iskwfile(files, wctx)
     kwdeleted = kwt.iskwfile(status.deleted, wctx)
     kwunknown = kwt.iskwfile(status.unknown, wctx)
-    if not opts.get(b'ignore') or opts.get(b'all'):
+    if not opts.get('ignore') or opts.get('all'):
         showfiles = kwfiles, kwdeleted, kwunknown
     else:
         showfiles = [], [], []
-    if opts.get(b'all') or opts.get(b'ignore'):
+    if opts.get('all') or opts.get('ignore'):
         showfiles += (
             [f for f in files if f not in kwfiles],
             [f for f in status.unknown if f not in kwunknown],
         )
     kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
     kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
-    fm = ui.formatter(b'kwfiles', opts)
+    fm = ui.formatter(b'kwfiles', pycompat.byteskwargs(opts))
     fmt = b'%.0s%s\n'
-    if opts.get(b'all') or ui.verbose:
+    if opts.get('all') or ui.verbose:
         fmt = b'%s %s\n'
     for kwstate, char, filenames in kwstates:
         label = b'kwfiles.' + kwstate
@@ -806,14 +804,14 @@
         kwtools[b'hgcmd'] = cmd
         return cmd, func, args, options, cmdoptions
 
-    extensions.wrapfunction(dispatch, b'_parse', kwdispatch_parse)
+    extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
 
-    extensions.wrapfunction(context.filectx, b'cmp', kwfilectx_cmp)
-    extensions.wrapfunction(patch.patchfile, b'__init__', kwpatchfile_init)
-    extensions.wrapfunction(patch, b'diff', kwdiff)
-    extensions.wrapfunction(cmdutil, b'amend', kw_amend)
-    extensions.wrapfunction(cmdutil, b'copy', kw_copy)
-    extensions.wrapfunction(cmdutil, b'dorecord', kw_dorecord)
+    extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
+    extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
+    extensions.wrapfunction(patch, 'diff', kwdiff)
+    extensions.wrapfunction(cmdutil, 'amend', kw_amend)
+    extensions.wrapfunction(cmdutil, 'copy', kw_copy)
+    extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
     for c in nokwwebcommands.split():
         extensions.wrapfunction(webcommands, c, kwweb_skip)
 
--- a/hgext/largefiles/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/largefiles/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -184,7 +184,7 @@
     )
 
     extensions.wrapfunction(
-        wireprotov1server.commands[b'heads'], b'func', proto.heads
+        wireprotov1server.commands[b'heads'], 'func', proto.heads
     )
     # TODO also wrap wireproto.commandsv2 once heads is implemented there.
 
@@ -193,7 +193,7 @@
         if name == b'rebase':
             # TODO: teach exthelper to handle this
             extensions.wrapfunction(
-                module, b'rebase', overrides.overriderebasecmd
+                module, 'rebase', overrides.overriderebasecmd
             )
 
 
--- a/hgext/largefiles/lfcommands.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/largefiles/lfcommands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -27,7 +27,6 @@
     lock,
     logcmdutil,
     match as matchmod,
-    pycompat,
     scmutil,
     util,
 )
@@ -87,12 +86,11 @@
     Use --to-normal to convert largefiles back to normal files; after
     this, the DEST repository can be used without largefiles at all."""
 
-    opts = pycompat.byteskwargs(opts)
-    if opts[b'to_normal']:
+    if opts['to_normal']:
         tolfile = False
     else:
         tolfile = True
-        size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
+        size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
 
     if not hg.islocal(src):
         raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
--- a/hgext/largefiles/lfutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/largefiles/lfutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -814,7 +814,7 @@
     Otherwise, this returns the function to always write out (or
     ignore if ``not forcibly``) status.
     """
-    if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
+    if forcibly is None and hasattr(repo, '_largefilesenabled'):
         return repo._lfstatuswriters[-1]
     else:
         if forcibly:
--- a/hgext/largefiles/overrides.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/largefiles/overrides.py	Tue Nov 07 15:21:11 2023 +0100
@@ -243,7 +243,7 @@
 
 # For overriding mercurial.hgweb.webcommands so that largefiles will
 # appear at their right place in the manifests.
-@eh.wrapfunction(webcommands, b'decodepath')
+@eh.wrapfunction(webcommands, 'decodepath')
 def decodepath(orig, path):
     return lfutil.splitstandin(path) or path
 
@@ -273,7 +273,7 @@
     return orig(ui, repo, *pats, **opts)
 
 
-@eh.wrapfunction(cmdutil, b'add')
+@eh.wrapfunction(cmdutil, 'add')
 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
     # The --normal flag short circuits this override
     if opts.get('normal'):
@@ -289,7 +289,7 @@
     return bad
 
 
-@eh.wrapfunction(cmdutil, b'remove')
+@eh.wrapfunction(cmdutil, 'remove')
 def cmdutilremove(
     orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
 ):
@@ -313,7 +313,7 @@
     )
 
 
-@eh.wrapfunction(dirstate.dirstate, b'_changing')
+@eh.wrapfunction(dirstate.dirstate, '_changing')
 @contextlib.contextmanager
 def _changing(orig, self, repo, change_type):
     pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
@@ -334,7 +334,7 @@
         self._sub_dirstate = pre
 
 
-@eh.wrapfunction(dirstate.dirstate, b'running_status')
+@eh.wrapfunction(dirstate.dirstate, 'running_status')
 @contextlib.contextmanager
 def running_status(orig, self, repo):
     pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
@@ -355,7 +355,7 @@
         self._sub_dirstate = pre
 
 
-@eh.wrapfunction(subrepo.hgsubrepo, b'status')
+@eh.wrapfunction(subrepo.hgsubrepo, 'status')
 def overridestatusfn(orig, repo, rev2, **opts):
     with lfstatus(repo._repo):
         return orig(repo, rev2, **opts)
@@ -367,7 +367,7 @@
         return orig(ui, repo, *pats, **opts)
 
 
-@eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
+@eh.wrapfunction(subrepo.hgsubrepo, 'dirty')
 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
     with lfstatus(repo._repo):
         return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
@@ -485,10 +485,10 @@
         return lambda ctx: match
 
     wrappedmatchandpats = extensions.wrappedfunction(
-        scmutil, b'matchandpats', overridematchandpats
+        scmutil, 'matchandpats', overridematchandpats
     )
     wrappedmakefilematcher = extensions.wrappedfunction(
-        logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
+        logcmdutil, '_makenofollowfilematcher', overridemakefilematcher
     )
     with wrappedmatchandpats, wrappedmakefilematcher:
         return orig(ui, repo, *pats, **opts)
@@ -554,7 +554,7 @@
 # The overridden function filters the unknown files by removing any
 # largefiles. This makes the merge proceed and we can then handle this
 # case further in the overridden calculateupdates function below.
-@eh.wrapfunction(merge, b'_checkunknownfile')
+@eh.wrapfunction(merge, '_checkunknownfile')
 def overridecheckunknownfile(
     origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
 ):
@@ -589,7 +589,7 @@
 # Finally, the merge.applyupdates function will then take care of
 # writing the files into the working copy and lfcommands.updatelfiles
 # will update the largefiles.
-@eh.wrapfunction(merge, b'calculateupdates')
+@eh.wrapfunction(merge, 'calculateupdates')
 def overridecalculateupdates(
     origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
 ):
@@ -700,7 +700,7 @@
     return mresult
 
 
-@eh.wrapfunction(mergestatemod, b'recordupdates')
+@eh.wrapfunction(mergestatemod, 'recordupdates')
 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
     if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
         lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
@@ -716,7 +716,7 @@
 
 # Override filemerge to prompt the user about how they wish to merge
 # largefiles. This will handle identical edits without prompting the user.
-@eh.wrapfunction(filemerge, b'filemerge')
+@eh.wrapfunction(filemerge, 'filemerge')
 def overridefilemerge(
     origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
 ):
@@ -748,7 +748,7 @@
     return 0, False
 
 
-@eh.wrapfunction(copiesmod, b'pathcopies')
+@eh.wrapfunction(copiesmod, 'pathcopies')
 def copiespathcopies(orig, ctx1, ctx2, match=None):
     copies = orig(ctx1, ctx2, match=match)
     updated = {}
@@ -764,7 +764,7 @@
 # checks if the destination largefile already exists. It also keeps a
 # list of copied files so that the largefiles can be copied and the
 # dirstate updated.
-@eh.wrapfunction(cmdutil, b'copy')
+@eh.wrapfunction(cmdutil, 'copy')
 def overridecopy(orig, ui, repo, pats, opts, rename=False):
     # doesn't remove largefile on rename
     if len(pats) < 2:
@@ -793,7 +793,7 @@
         match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
         return composenormalfilematcher(match, manifest)
 
-    with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
+    with extensions.wrappedfunction(scmutil, 'match', normalfilesmatchfn):
         try:
             result = orig(ui, repo, pats, opts, rename)
         except error.Abort as e:
@@ -887,8 +887,8 @@
             copiedfiles.append((src, dest))
             orig(src, dest, *args, **kwargs)
 
-        with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
-            with extensions.wrappedfunction(scmutil, b'match', overridematch):
+        with extensions.wrappedfunction(util, 'copyfile', overridecopyfile):
+            with extensions.wrappedfunction(scmutil, 'match', overridematch):
                 result += orig(ui, repo, listpats, opts, rename)
 
         lfdirstate = lfutil.openlfdirstate(ui, repo)
@@ -936,7 +936,7 @@
 # commits. Update the standins then run the original revert, changing
 # the matcher to hit standins instead of largefiles. Based on the
 # resulting standins update the largefiles.
-@eh.wrapfunction(cmdutil, b'revert')
+@eh.wrapfunction(cmdutil, 'revert')
 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
     # Because we put the standins in a bad state (by updating them)
     # and then return them to a correct state we need to lock to
@@ -999,7 +999,7 @@
             m.matchfn = matchfn
             return m
 
-        with extensions.wrappedfunction(scmutil, b'match', overridematch):
+        with extensions.wrappedfunction(scmutil, 'match', overridematch):
             orig(ui, repo, ctx, *pats, **opts)
 
         newstandins = lfutil.getstandinsstate(repo)
@@ -1079,7 +1079,7 @@
     return orig(ui, repo, *args, **kwargs)
 
 
-@eh.wrapfunction(exchange, b'pushoperation')
+@eh.wrapfunction(exchange, 'pushoperation')
 def exchangepushoperation(orig, *args, **kwargs):
     """Override pushoperation constructor and store lfrevs parameter"""
     lfrevs = kwargs.pop('lfrevs', None)
@@ -1139,7 +1139,7 @@
     return orig(ui, source, dest, **opts)
 
 
-@eh.wrapfunction(hg, b'clone')
+@eh.wrapfunction(hg, 'clone')
 def hgclone(orig, ui, opts, *args, **kwargs):
     result = orig(ui, opts, *args, **kwargs)
 
@@ -1167,7 +1167,7 @@
 
 @eh.wrapcommand(b'rebase', extension=b'rebase')
 def overriderebasecmd(orig, ui, repo, **opts):
-    if not util.safehasattr(repo, b'_largefilesenabled'):
+    if not hasattr(repo, '_largefilesenabled'):
         return orig(ui, repo, **opts)
 
     resuming = opts.get('continue')
@@ -1195,7 +1195,7 @@
             kwargs['inmemory'] = False
             return orig(*args, **kwargs)
 
-        extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
+        extensions.wrapfunction(rebase, '_dorebase', _dorebase)
 
 
 @eh.wrapcommand(b'archive')
@@ -1204,13 +1204,13 @@
         return orig(ui, repo.unfiltered(), dest, **opts)
 
 
-@eh.wrapfunction(webcommands, b'archive')
+@eh.wrapfunction(webcommands, 'archive')
 def hgwebarchive(orig, web):
     with lfstatus(web.repo):
         return orig(web)
 
 
-@eh.wrapfunction(archival, b'archive')
+@eh.wrapfunction(archival, 'archive')
 def overridearchive(
     orig,
     repo,
@@ -1298,7 +1298,7 @@
             # allow only hgsubrepos to set this, instead of the current scheme
             # where the parent sets this for the child.
             with (
-                util.safehasattr(sub, '_repo')
+                hasattr(sub, '_repo')
                 and lfstatus(sub._repo)
                 or util.nullcontextmanager()
             ):
@@ -1307,9 +1307,9 @@
     archiver.done()
 
 
-@eh.wrapfunction(subrepo.hgsubrepo, b'archive')
+@eh.wrapfunction(subrepo.hgsubrepo, 'archive')
 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
-    lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
+    lfenabled = hasattr(repo._repo, '_largefilesenabled')
     if not lfenabled or not repo._repo.lfstatus:
         return orig(repo, archiver, prefix, match, decode)
 
@@ -1364,7 +1364,7 @@
         # would allow only hgsubrepos to set this, instead of the current scheme
         # where the parent sets this for the child.
         with (
-            util.safehasattr(sub, '_repo')
+            hasattr(sub, '_repo')
             and lfstatus(sub._repo)
             or util.nullcontextmanager()
         ):
@@ -1375,7 +1375,7 @@
 # standin until a commit. cmdutil.bailifchanged() raises an exception
 # if the repo has uncommitted changes. Wrap it to also check if
 # largefiles were changed. This is used by bisect, backout and fetch.
-@eh.wrapfunction(cmdutil, b'bailifchanged')
+@eh.wrapfunction(cmdutil, 'bailifchanged')
 def overridebailifchanged(orig, repo, *args, **kwargs):
     orig(repo, *args, **kwargs)
     with lfstatus(repo):
@@ -1384,13 +1384,13 @@
         raise error.Abort(_(b'uncommitted changes'))
 
 
-@eh.wrapfunction(cmdutil, b'postcommitstatus')
+@eh.wrapfunction(cmdutil, 'postcommitstatus')
 def postcommitstatus(orig, repo, *args, **kwargs):
     with lfstatus(repo):
         return orig(repo, *args, **kwargs)
 
 
-@eh.wrapfunction(cmdutil, b'forget')
+@eh.wrapfunction(cmdutil, 'forget')
 def cmdutilforget(
     orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
 ):
@@ -1559,7 +1559,7 @@
         orig(ui, repo, *pats, **opts)
 
 
-@eh.wrapfunction(scmutil, b'addremove')
+@eh.wrapfunction(scmutil, 'addremove')
 def scmutiladdremove(
     orig,
     repo,
@@ -1717,11 +1717,10 @@
 
 @eh.wrapcommand(b'cat')
 def overridecat(orig, ui, repo, file1, *pats, **opts):
-    opts = pycompat.byteskwargs(opts)
-    ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
+    ctx = logcmdutil.revsingle(repo, opts.get('rev'))
     err = 1
     notbad = set()
-    m = scmutil.match(ctx, (file1,) + pats, opts)
+    m = scmutil.match(ctx, (file1,) + pats, pycompat.byteskwargs(opts))
     origmatchfn = m.matchfn
 
     def lfmatchfn(f):
@@ -1758,12 +1757,12 @@
     m.visitdir = lfvisitdirfn
 
     for f in ctx.walk(m):
-        with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
+        with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
             lf = lfutil.splitstandin(f)
             if lf is None or origmatchfn(f):
                 # duplicating unreachable code from commands.cat
                 data = ctx[f].data()
-                if opts.get(b'decode'):
+                if opts.get('decode'):
                     data = repo.wwritedata(f, data)
                 fp.write(data)
             else:
@@ -1787,7 +1786,7 @@
     return err
 
 
-@eh.wrapfunction(merge, b'_update')
+@eh.wrapfunction(merge, '_update')
 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
     matcher = kwargs.get('matcher', None)
     # note if this is a partial update
@@ -1880,7 +1879,7 @@
         return result
 
 
-@eh.wrapfunction(scmutil, b'marktouched')
+@eh.wrapfunction(scmutil, 'marktouched')
 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
     result = orig(repo, files, *args, **kwargs)
 
@@ -1901,8 +1900,8 @@
     return result
 
 
-@eh.wrapfunction(upgrade_actions, b'preservedrequirements')
-@eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
+@eh.wrapfunction(upgrade_actions, 'preservedrequirements')
+@eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
 def upgraderequirements(orig, repo):
     reqs = orig(repo)
     if b'largefiles' in repo.requirements:
@@ -1913,7 +1912,7 @@
 _lfscheme = b'largefile://'
 
 
-@eh.wrapfunction(urlmod, b'open')
+@eh.wrapfunction(urlmod, 'open')
 def openlargefile(orig, ui, url_, data=None, **kwargs):
     if url_.startswith(_lfscheme):
         if data:
--- a/hgext/largefiles/proto.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/largefiles/proto.py	Tue Nov 07 15:21:11 2023 +0100
@@ -200,7 +200,7 @@
 
 
 # advertise the largefiles=serve capability
-@eh.wrapfunction(wireprotov1server, b'_capabilities')
+@eh.wrapfunction(wireprotov1server, '_capabilities')
 def _capabilities(orig, repo, proto):
     '''announce largefile server capability'''
     caps = orig(repo, proto)
--- a/hgext/largefiles/storefactory.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/largefiles/storefactory.py	Tue Nov 07 15:21:11 2023 +0100
@@ -5,7 +5,6 @@
 import re
 
 from mercurial.i18n import _
-from mercurial.pycompat import getattr
 from mercurial import (
     error,
     hg,
@@ -57,7 +56,7 @@
 
     # The path could be a scheme so use Mercurial's normal functionality
     # to resolve the scheme to a repository and use its path
-    path = util.safehasattr(remote, b'url') and remote.url() or remote.path
+    path = hasattr(remote, 'url') and remote.url() or remote.path
 
     match = _scheme_re.match(path)
     if not match:  # regular filesystem path
--- a/hgext/lfs/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/lfs/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -342,7 +342,7 @@
     wrapfunction(filelog, 'size', wrapper.filelogsize)
 
 
-@eh.wrapfunction(localrepo, b'resolverevlogstorevfsoptions')
+@eh.wrapfunction(localrepo, 'resolverevlogstorevfsoptions')
 def _resolverevlogstorevfsoptions(orig, ui, requirements, features):
     opts = orig(ui, requirements, features)
     for name, module in extensions.extensions(ui):
--- a/hgext/lfs/blobstore.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/lfs/blobstore.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 import socket
 
 from mercurial.i18n import _
-from mercurial.pycompat import getattr
 from mercurial.node import hex
 
 from mercurial import (
@@ -271,7 +270,7 @@
     if isinstance(urlerror.reason, Exception):
         inst = urlerror.reason
 
-    if util.safehasattr(inst, b'reason'):
+    if hasattr(inst, 'reason'):
         try:  # usually it is in the form (errno, strerror)
             reason = inst.reason.args[1]
         except (AttributeError, IndexError):
@@ -751,7 +750,7 @@
     if lfsurl is None:
         if remote:
             path = remote
-        elif util.safehasattr(repo, b'_subtoppath'):
+        elif hasattr(repo, '_subtoppath'):
             # The pull command sets this during the optional update phase, which
             # tells exactly where the pull originated, whether 'paths.default'
             # or explicit.
--- a/hgext/lfs/wireprotolfsserver.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/lfs/wireprotolfsserver.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,7 +16,6 @@
 from mercurial import (
     exthelper,
     pycompat,
-    util,
     wireprotoserver,
 )
 
@@ -33,7 +32,7 @@
 eh = exthelper.exthelper()
 
 
-@eh.wrapfunction(wireprotoserver, b'handlewsgirequest')
+@eh.wrapfunction(wireprotoserver, 'handlewsgirequest')
 def handlewsgirequest(orig, rctx, req, res, checkperm):
     """Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS
     request if it is left unprocessed by the wrapped method.
@@ -44,7 +43,7 @@
     if not rctx.repo.ui.configbool(b'experimental', b'lfs.serve'):
         return False
 
-    if not util.safehasattr(rctx.repo.svfs, 'lfslocalblobstore'):
+    if not hasattr(rctx.repo.svfs, 'lfslocalblobstore'):
         return False
 
     if not req.dispatchpath:
--- a/hgext/lfs/wrapper.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/lfs/wrapper.py	Tue Nov 07 15:21:11 2023 +0100
@@ -10,10 +10,6 @@
 
 from mercurial.i18n import _
 from mercurial.node import bin, hex, short
-from mercurial.pycompat import (
-    getattr,
-    setattr,
-)
 
 from mercurial import (
     bundle2,
@@ -26,7 +22,6 @@
     localrepo,
     revlog,
     scmutil,
-    util,
     vfs as vfsmod,
     wireprotov1server,
 )
@@ -53,7 +48,7 @@
 eh = exthelper.exthelper()
 
 
-@eh.wrapfunction(localrepo, b'makefilestorage')
+@eh.wrapfunction(localrepo, 'makefilestorage')
 def localrepomakefilestorage(orig, requirements, features, **kwargs):
     if b'lfs' in requirements:
         features.add(repository.REPO_FEATURE_LFS)
@@ -61,18 +56,18 @@
     return orig(requirements=requirements, features=features, **kwargs)
 
 
-@eh.wrapfunction(changegroup, b'allsupportedversions')
+@eh.wrapfunction(changegroup, 'allsupportedversions')
 def allsupportedversions(orig, ui):
     versions = orig(ui)
     versions.add(b'03')
     return versions
 
 
-@eh.wrapfunction(wireprotov1server, b'_capabilities')
+@eh.wrapfunction(wireprotov1server, '_capabilities')
 def _capabilities(orig, repo, proto):
     '''Wrap server command to announce lfs server capability'''
     caps = orig(repo, proto)
-    if util.safehasattr(repo.svfs, b'lfslocalblobstore'):
+    if hasattr(repo.svfs, 'lfslocalblobstore'):
         # Advertise a slightly different capability when lfs is *required*, so
         # that the client knows it MUST load the extension.  If lfs is not
         # required on the server, there's no reason to autoload the extension
@@ -227,7 +222,7 @@
     return orig(self, rev)
 
 
-@eh.wrapfunction(revlog, b'_verify_revision')
+@eh.wrapfunction(revlog, '_verify_revision')
 def _verify_revision(orig, rl, skipflags, state, node):
     if _islfs(rl, node=node):
         rawtext = rl.rawdata(node)
@@ -246,7 +241,7 @@
     orig(rl, skipflags, state, node)
 
 
-@eh.wrapfunction(context.basefilectx, b'cmp')
+@eh.wrapfunction(context.basefilectx, 'cmp')
 def filectxcmp(orig, self, fctx):
     """returns True if text is different than fctx"""
     # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
@@ -258,7 +253,7 @@
     return orig(self, fctx)
 
 
-@eh.wrapfunction(context.basefilectx, b'isbinary')
+@eh.wrapfunction(context.basefilectx, 'isbinary')
 def filectxisbinary(orig, self):
     if self.islfs():
         # fast path: use lfs metadata to answer isbinary
@@ -272,13 +267,13 @@
     return _islfs(self.filelog()._revlog, self.filenode())
 
 
-@eh.wrapfunction(cmdutil, b'_updatecatformatter')
+@eh.wrapfunction(cmdutil, '_updatecatformatter')
 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
     orig(fm, ctx, matcher, path, decode)
     fm.data(rawdata=ctx[path].rawdata())
 
 
-@eh.wrapfunction(scmutil, b'wrapconvertsink')
+@eh.wrapfunction(scmutil, 'wrapconvertsink')
 def convertsink(orig, sink):
     sink = orig(sink)
     if sink.repotype == b'hg':
@@ -325,7 +320,7 @@
 
 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
 # options and blob stores are passed from othervfs to the new readonlyvfs.
-@eh.wrapfunction(vfsmod.readonlyvfs, b'__init__')
+@eh.wrapfunction(vfsmod.readonlyvfs, '__init__')
 def vfsinit(orig, self, othervfs):
     orig(self, othervfs)
     # copy lfs related options
@@ -334,15 +329,15 @@
             self.options[k] = v
     # also copy lfs blobstores. note: this can run before reposetup, so lfs
     # blobstore attributes are not always ready at this time.
-    for name in [b'lfslocalblobstore', b'lfsremoteblobstore']:
-        if util.safehasattr(othervfs, name):
+    for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
+        if hasattr(othervfs, name):
             setattr(self, name, getattr(othervfs, name))
 
 
 def _prefetchfiles(repo, revmatches):
     """Ensure that required LFS blobs are present, fetching them as a group if
     needed."""
-    if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
+    if not hasattr(repo.svfs, 'lfslocalblobstore'):
         return
 
     pointers = []
@@ -366,7 +361,7 @@
 
 def _canskipupload(repo):
     # Skip if this hasn't been passed to reposetup()
-    if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
+    if not hasattr(repo.svfs, 'lfsremoteblobstore'):
         return True
 
     # if remotestore is a null store, upload is a no-op and can be skipped
@@ -375,7 +370,7 @@
 
 def candownload(repo):
     # Skip if this hasn't been passed to reposetup()
-    if not util.safehasattr(repo.svfs, b'lfsremoteblobstore'):
+    if not hasattr(repo.svfs, 'lfsremoteblobstore'):
         return False
 
     # if remotestore is a null store, downloads will lead to nothing
@@ -383,10 +378,7 @@
 
 
 def uploadblobsfromrevs(repo, revs):
-    """upload lfs blobs introduced by revs
-
-    Note: also used by other extensions e. g. infinitepush. avoid renaming.
-    """
+    """upload lfs blobs introduced by revs"""
     if _canskipupload(repo):
         return
     pointers = extractpointers(repo, revs)
@@ -403,7 +395,7 @@
     return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
 
 
-@eh.wrapfunction(exchange, b'push')
+@eh.wrapfunction(exchange, 'push')
 def push(orig, repo, remote, *args, **kwargs):
     """bail on push if the extension isn't enabled on remote when needed, and
     update the remote store based on the destination path."""
@@ -433,7 +425,7 @@
 
 
 # when writing a bundle via "hg bundle" command, upload related LFS blobs
-@eh.wrapfunction(bundle2, b'writenewbundle')
+@eh.wrapfunction(bundle2, 'writenewbundle')
 def writenewbundle(
     orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
 ):
@@ -522,14 +514,14 @@
     remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
 
 
-@eh.wrapfunction(upgrade_engine, b'finishdatamigration')
+@eh.wrapfunction(upgrade_engine, 'finishdatamigration')
 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
     orig(ui, srcrepo, dstrepo, requirements)
 
     # Skip if this hasn't been passed to reposetup()
-    if util.safehasattr(
-        srcrepo.svfs, b'lfslocalblobstore'
-    ) and util.safehasattr(dstrepo.svfs, b'lfslocalblobstore'):
+    if hasattr(srcrepo.svfs, 'lfslocalblobstore') and hasattr(
+        dstrepo.svfs, 'lfslocalblobstore'
+    ):
         srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
         dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
 
@@ -539,8 +531,8 @@
                 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
 
 
-@eh.wrapfunction(upgrade_actions, b'preservedrequirements')
-@eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
+@eh.wrapfunction(upgrade_actions, 'preservedrequirements')
+@eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
 def upgraderequirements(orig, repo):
     reqs = orig(repo)
     if b'lfs' in repo.requirements:
--- a/hgext/mq.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/mq.py	Tue Nov 07 15:21:11 2023 +0100
@@ -75,8 +75,6 @@
     short,
 )
 from mercurial.pycompat import (
-    delattr,
-    getattr,
     open,
 )
 from mercurial import (
@@ -4186,7 +4184,7 @@
 
 
 def mqimport(orig, ui, repo, *args, **kwargs):
-    if util.safehasattr(repo, b'abortifwdirpatched') and not kwargs.get(
+    if hasattr(repo, 'abortifwdirpatched') and not kwargs.get(
         'no_commit', False
     ):
         repo.abortifwdirpatched(
--- a/hgext/narrow/narrowbundle2.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/narrow/narrowbundle2.py	Tue Nov 07 15:21:11 2023 +0100
@@ -259,7 +259,7 @@
     # will currently always be there when using the core+narrowhg server, but
     # other servers may include a changespec part even when not widening (e.g.
     # because we're deepening a shallow repo).
-    if util.safehasattr(repo, 'setnewnarrowpats'):
+    if hasattr(repo, 'setnewnarrowpats'):
         op.gettransaction()
         repo.setnewnarrowpats()
 
@@ -333,9 +333,9 @@
 
     def wrappedcghandler(op, inpart):
         origcghandler(op, inpart)
-        if util.safehasattr(op, '_widen_bundle'):
+        if hasattr(op, '_widen_bundle'):
             handlechangegroup_widen(op, inpart)
-        if util.safehasattr(op, '_bookmarksbackup'):
+        if hasattr(op, '_bookmarksbackup'):
             localrepo.localrepository._bookmarks.set(
                 op.repo, op._bookmarksbackup
             )
--- a/hgext/narrow/narrowcommands.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/narrow/narrowcommands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -87,9 +87,8 @@
 
 def clonenarrowcmd(orig, ui, repo, *args, **opts):
     """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
-    opts = pycompat.byteskwargs(opts)
     wrappedextraprepare = util.nullcontextmanager()
-    narrowspecfile = opts[b'narrowspec']
+    narrowspecfile = opts['narrowspec']
 
     if narrowspecfile:
         filepath = os.path.join(encoding.getcwd(), narrowspecfile)
@@ -115,24 +114,25 @@
         narrowspec.validatepatterns(excludes)
 
         # narrowspec is passed so we should assume that user wants narrow clone
-        opts[b'narrow'] = True
-        opts[b'include'].extend(includes)
-        opts[b'exclude'].extend(excludes)
+        opts['narrow'] = True
+        opts['include'].extend(includes)
+        opts['exclude'].extend(excludes)
 
-    if opts[b'narrow']:
+    if opts['narrow']:
 
         def pullbundle2extraprepare_widen(orig, pullop, kwargs):
             orig(pullop, kwargs)
 
-            if opts.get(b'depth'):
-                kwargs[b'depth'] = opts[b'depth']
+            if opts.get('depth'):
+                # TODO: fix exchange._pullbundle2extraprepare()
+                kwargs[b'depth'] = opts['depth']
 
         wrappedextraprepare = extensions.wrappedfunction(
-            exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
+            exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen
         )
 
     with wrappedextraprepare:
-        return orig(ui, repo, *args, **pycompat.strkwargs(opts))
+        return orig(ui, repo, *args, **opts)
 
 
 def pullnarrowcmd(orig, ui, repo, *args, **opts):
@@ -146,7 +146,7 @@
                 kwargs[b'depth'] = opts['depth']
 
         wrappedextraprepare = extensions.wrappedfunction(
-            exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
+            exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen
         )
 
     with wrappedextraprepare:
@@ -201,7 +201,7 @@
 
 
 extensions.wrapfunction(
-    exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare
+    exchange, '_pullbundle2extraprepare', pullbundle2extraprepare
 )
 
 
@@ -366,7 +366,7 @@
         kwargs[b'excludepats'] = newexcludes
 
     wrappedextraprepare = extensions.wrappedfunction(
-        exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen
+        exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen
     )
 
     # define a function that narrowbundle2 can call after creating the
@@ -511,7 +511,6 @@
     add --addinclude, --addexclude rules in bulk. Like the other include and
     exclude switches, the changes are applied immediately.
     """
-    opts = pycompat.byteskwargs(opts)
     if requirements.NARROW_REQUIREMENT not in repo.requirements:
         raise error.InputError(
             _(
@@ -522,14 +521,14 @@
 
     # Before supporting, decide whether it "hg tracked --clear" should mean
     # tracking no paths or all paths.
-    if opts[b'clear']:
+    if opts['clear']:
         raise error.InputError(_(b'the --clear option is not yet supported'))
 
     # import rules from a file
-    newrules = opts.get(b'import_rules')
+    newrules = opts.get('import_rules')
     if newrules:
+        filepath = os.path.join(encoding.getcwd(), newrules)
         try:
-            filepath = os.path.join(encoding.getcwd(), newrules)
             fdata = util.readfile(filepath)
         except IOError as inst:
             raise error.StorageError(
@@ -546,16 +545,16 @@
                     b"is not supported in narrowspec"
                 )
             )
-        opts[b'addinclude'].extend(includepats)
-        opts[b'addexclude'].extend(excludepats)
+        opts['addinclude'].extend(includepats)
+        opts['addexclude'].extend(excludepats)
 
-    addedincludes = narrowspec.parsepatterns(opts[b'addinclude'])
-    removedincludes = narrowspec.parsepatterns(opts[b'removeinclude'])
-    addedexcludes = narrowspec.parsepatterns(opts[b'addexclude'])
-    removedexcludes = narrowspec.parsepatterns(opts[b'removeexclude'])
-    autoremoveincludes = opts[b'auto_remove_includes']
+    addedincludes = narrowspec.parsepatterns(opts['addinclude'])
+    removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
+    addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
+    removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
+    autoremoveincludes = opts['auto_remove_includes']
 
-    update_working_copy = opts[b'update_working_copy']
+    update_working_copy = opts['update_working_copy']
     only_show = not (
         addedincludes
         or removedincludes
@@ -570,7 +569,7 @@
     if only_show:
         oldincludes, oldexcludes = repo.narrowpats
         ui.pager(b'tracked')
-        fm = ui.formatter(b'narrow', opts)
+        fm = ui.formatter(b'narrow', pycompat.byteskwargs(opts))
         for i in sorted(oldincludes):
             fm.startitem()
             fm.write(b'status', b'%s ', b'I', label=b'narrow.included')
@@ -614,7 +613,7 @@
         # also define the set of revisions to update for widening.
         path = urlutil.get_unique_pull_path_obj(b'tracked', ui, remotepath)
         ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
-        remote = hg.peer(repo, opts, path)
+        remote = hg.peer(repo, pycompat.byteskwargs(opts), path)
 
         try:
             # check narrow support before doing anything if widening needs to be
@@ -670,8 +669,8 @@
                     oldexcludes,
                     newincludes,
                     newexcludes,
-                    opts[b'force_delete_local_changes'],
-                    opts[b'backup'],
+                    opts['force_delete_local_changes'],
+                    opts['backup'],
                 )
                 # _narrow() updated the narrowspec and _widen() below needs to
                 # use the updated values as its base (otherwise removed includes
--- a/hgext/narrow/narrowwirepeer.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/narrow/narrowwirepeer.py	Tue Nov 07 15:21:11 2023 +0100
@@ -36,7 +36,7 @@
                 kwargs["excludepats"] = b','.join(exclude)
             return orig(cmd, *args, **kwargs)
 
-        extensions.wrapfunction(peer, b'_calltwowaystream', wrapped)
+        extensions.wrapfunction(peer, '_calltwowaystream', wrapped)
 
     hg.wirepeersetupfuncs.append(wirereposetup)
 
--- a/hgext/pager.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/pager.py	Tue Nov 07 15:21:11 2023 +0100
@@ -76,7 +76,7 @@
                 ui.disablepager()
         return orig(ui, options, cmd, cmdfunc)
 
-    extensions.wrapfunction(dispatch, b'_runcommand', pagecmd)
+    extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
 
 
 attended = [b'annotate', b'cat', b'diff', b'export', b'glog', b'log', b'qdiff']
--- a/hgext/phabricator.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/phabricator.py	Tue Nov 07 15:21:11 2023 +0100
@@ -71,7 +71,6 @@
 
 from mercurial.node import bin, short
 from mercurial.i18n import _
-from mercurial.pycompat import getattr
 from mercurial.thirdparty import attr
 from mercurial import (
     cmdutil,
--- a/hgext/releasenotes.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/releasenotes.py	Tue Nov 07 15:21:11 2023 +0100
@@ -24,7 +24,6 @@
     error,
     logcmdutil,
     minirst,
-    pycompat,
     registrar,
     util,
 )
@@ -665,17 +664,16 @@
     admonitions (if any).
     """
 
-    opts = pycompat.byteskwargs(opts)
     sections = releasenotessections(ui, repo)
 
-    cmdutil.check_incompatible_arguments(opts, b'list', [b'rev', b'check'])
+    cmdutil.check_incompatible_arguments(opts, 'list', ['rev', 'check'])
 
-    if opts.get(b'list'):
+    if opts.get('list'):
         return _getadmonitionlist(ui, sections)
 
-    rev = opts.get(b'rev')
+    rev = opts.get('rev')
     revs = logcmdutil.revrange(repo, [rev or b'not public()'])
-    if opts.get(b'check'):
+    if opts.get('check'):
         return checkadmonitions(ui, repo, sections.names(), revs)
 
     incoming = parsenotesfromrevisions(repo, sections.names(), revs)
--- a/hgext/relink.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/relink.py	Tue Nov 07 15:21:11 2023 +0100
@@ -60,9 +60,7 @@
     command is running. (Both repositories will be locked against
     writes.)
     """
-    if not util.safehasattr(util, b'samefile') or not util.safehasattr(
-        util, b'samedevice'
-    ):
+    if not hasattr(util, 'samefile') or not hasattr(util, 'samedevice'):
         raise error.Abort(_(b'hardlinks are not supported on this system'))
 
     if origin is None and b'default-relink' in ui.paths:
--- a/hgext/remotefilelog/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -317,31 +317,31 @@
     changegroup.cgpacker = shallowbundle.shallowcg1packer
 
     extensions.wrapfunction(
-        changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
+        changegroup, '_addchangegroupfiles', shallowbundle.addchangegroupfiles
     )
     extensions.wrapfunction(
-        changegroup, b'makechangegroup', shallowbundle.makechangegroup
+        changegroup, 'makechangegroup', shallowbundle.makechangegroup
     )
-    extensions.wrapfunction(localrepo, b'makestore', storewrapper)
-    extensions.wrapfunction(exchange, b'pull', exchangepull)
-    extensions.wrapfunction(merge, b'applyupdates', applyupdates)
-    extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
-    extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
-    extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
+    extensions.wrapfunction(localrepo, 'makestore', storewrapper)
+    extensions.wrapfunction(exchange, 'pull', exchangepull)
+    extensions.wrapfunction(merge, 'applyupdates', applyupdates)
+    extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
+    extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
+    extensions.wrapfunction(scmutil, '_findrenames', findrenames)
     extensions.wrapfunction(
-        copies, b'_computeforwardmissing', computeforwardmissing
+        copies, '_computeforwardmissing', computeforwardmissing
     )
-    extensions.wrapfunction(dispatch, b'runcommand', runcommand)
-    extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
-    extensions.wrapfunction(context.changectx, b'filectx', filectx)
-    extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
-    extensions.wrapfunction(patch, b'trydiff', trydiff)
-    extensions.wrapfunction(hg, b'verify', _verify)
+    extensions.wrapfunction(dispatch, 'runcommand', runcommand)
+    extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
+    extensions.wrapfunction(context.changectx, 'filectx', filectx)
+    extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
+    extensions.wrapfunction(patch, 'trydiff', trydiff)
+    extensions.wrapfunction(hg, 'verify', _verify)
     scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
 
     # disappointing hacks below
-    extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
-    extensions.wrapfunction(revset, b'filelog', filelogrevset)
+    extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
+    extensions.wrapfunction(revset, 'filelog', filelogrevset)
     revset.symbols[b'filelog'] = revset.filelog
 
 
@@ -374,7 +374,7 @@
             else:
                 return orig(self, *args, **kwargs)
 
-        extensions.wrapfunction(exchange, b'pull', pull_shallow)
+        extensions.wrapfunction(exchange, 'pull', pull_shallow)
 
         # Wrap the stream logic to add requirements and to pass include/exclude
         # patterns around.
@@ -393,14 +393,14 @@
                 else:
                     return orig()
 
-            extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
+            extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
 
         def stream_wrap(orig, op):
             setup_streamout(op.repo, op.remote)
             return orig(op)
 
         extensions.wrapfunction(
-            streamclone, b'maybeperformlegacystreamclone', stream_wrap
+            streamclone, 'maybeperformlegacystreamclone', stream_wrap
         )
 
         def canperformstreamclone(orig, pullop, bundle2=False):
@@ -417,7 +417,7 @@
             return supported, requirements
 
         extensions.wrapfunction(
-            streamclone, b'canperformstreamclone', canperformstreamclone
+            streamclone, 'canperformstreamclone', canperformstreamclone
         )
 
     try:
@@ -425,7 +425,7 @@
     finally:
         if opts.get('shallow'):
             for r in repos:
-                if util.safehasattr(r, b'fileservice'):
+                if hasattr(r, 'fileservice'):
                     r.fileservice.close()
 
 
@@ -721,7 +721,7 @@
             )
 
     extensions.wrapfunction(
-        remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
+        remotefilelog.remotefilelog, 'addrawrevision', addrawrevision
     )
 
     def changelogadd(orig, self, *args, **kwargs):
@@ -749,7 +749,7 @@
         del pendingfilecommits[:]
         return node
 
-    extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
+    extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
 
 
 def getrenamedfn(orig, repo, endrev=None):
@@ -904,7 +904,7 @@
         if not isenabled(repo):
             continue
 
-        if not util.safehasattr(repo, b'name'):
+        if not hasattr(repo, 'name'):
             ui.warn(
                 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
             )
@@ -1034,7 +1034,7 @@
     bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
 
     def anon(unused_success):
-        if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
+        if hasattr(repo, 'ranprefetch') and repo.ranprefetch:
             return
         repo.ranprefetch = True
         repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
@@ -1080,10 +1080,10 @@
             source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
         )
 
-    if util.safehasattr(remote, b'_callstream'):
+    if hasattr(remote, '_callstream'):
         remote._localrepo = repo
-    elif util.safehasattr(remote, b'getbundle'):
-        extensions.wrapfunction(remote, b'getbundle', localgetbundle)
+    elif hasattr(remote, 'getbundle'):
+        extensions.wrapfunction(remote, 'getbundle', localgetbundle)
 
     return orig(repo, remote, *args, **kwargs)
 
--- a/hgext/remotefilelog/basepack.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/basepack.py	Tue Nov 07 15:21:11 2023 +0100
@@ -7,7 +7,6 @@
 
 from mercurial.i18n import _
 from mercurial.pycompat import (
-    getattr,
     open,
 )
 from mercurial.node import hex
--- a/hgext/remotefilelog/basestore.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/basestore.py	Tue Nov 07 15:21:11 2023 +0100
@@ -415,7 +415,7 @@
 
     def markforrefresh(self):
         for store in self.stores:
-            if util.safehasattr(store, b'markforrefresh'):
+            if hasattr(store, b'markforrefresh'):
                 store.markforrefresh()
 
     @staticmethod
--- a/hgext/remotefilelog/connectionpool.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/connectionpool.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
 from mercurial import (
     hg,
     sshpeer,
-    util,
 )
 
 _sshv1peer = sshpeer.sshv1peer
@@ -41,14 +40,14 @@
         if conn is None:
 
             peer = hg.peer(self._repo.ui, {}, path)
-            if util.safehasattr(peer, '_cleanup'):
+            if hasattr(peer, '_cleanup'):
 
                 class mypeer(peer.__class__):
                     def _cleanup(self, warn=None):
                         # close pipee first so peer.cleanup reading it won't
                         # deadlock, if there are other processes with pipeo
                         # open (i.e. us).
-                        if util.safehasattr(self, 'pipee'):
+                        if hasattr(self, 'pipee'):
                             self.pipee.close()
                         return super(mypeer, self)._cleanup()
 
@@ -83,5 +82,5 @@
             self.close()
 
     def close(self):
-        if util.safehasattr(self.peer, 'cleanup'):
+        if hasattr(self.peer, 'cleanup'):
             self.peer.cleanup()
--- a/hgext/remotefilelog/contentstore.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/contentstore.py	Tue Nov 07 15:21:11 2023 +0100
@@ -4,7 +4,6 @@
     hex,
     sha1nodeconstants,
 )
-from mercurial.pycompat import getattr
 from mercurial import (
     mdiff,
     revlog,
--- a/hgext/remotefilelog/fileserverclient.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/fileserverclient.py	Tue Nov 07 15:21:11 2023 +0100
@@ -92,7 +92,7 @@
                 not in self.capabilities()
             ):
                 return
-            if not util.safehasattr(self, '_localrepo'):
+            if not hasattr(self, '_localrepo'):
                 return
             if (
                 constants.SHALLOWREPO_REQUIREMENT
@@ -132,7 +132,7 @@
 
         def _callstream(self, command, **opts):
             supertype = super(remotefilepeer, self)
-            if not util.safehasattr(supertype, '_sendrequest'):
+            if not hasattr(supertype, '_sendrequest'):
                 self._updatecallstreamopts(command, pycompat.byteskwargs(opts))
             return super(remotefilepeer, self)._callstream(command, **opts)
 
@@ -641,9 +641,7 @@
             self._lfsprefetch(fileids)
 
     def _lfsprefetch(self, fileids):
-        if not _lfsmod or not util.safehasattr(
-            self.repo.svfs, b'lfslocalblobstore'
-        ):
+        if not _lfsmod or not hasattr(self.repo.svfs, b'lfslocalblobstore'):
             return
         if not _lfsmod.wrapper.candownload(self.repo):
             return
--- a/hgext/remotefilelog/remotefilelog.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/remotefilelog.py	Tue Nov 07 15:21:11 2023 +0100
@@ -44,7 +44,6 @@
 
 class remotefilelog:
 
-    _generaldelta = True
     _flagserrorclass = error.RevlogError
 
     def __init__(self, opener, path, repo):
--- a/hgext/remotefilelog/remotefilelogserver.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/remotefilelogserver.py	Tue Nov 07 15:21:11 2023 +0100
@@ -67,7 +67,7 @@
         )
 
     extensions.wrapfunction(
-        changegroup.cgpacker, b'generatefiles', generatefiles
+        changegroup.cgpacker, 'generatefiles', generatefiles
     )
 
 
@@ -207,7 +207,7 @@
             ):
                 yield x
 
-    extensions.wrapfunction(streamclone, b'_walkstreamfiles', _walkstreamfiles)
+    extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
 
     # expose remotefilelog capabilities
     def _capabilities(orig, repo, proto):
@@ -222,18 +222,18 @@
             caps.append(b'x_rfl_getfile')
         return caps
 
-    extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
+    extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
 
     def _adjustlinkrev(orig, self, *args, **kwargs):
         # When generating file blobs, taking the real path is too slow on large
         # repos, so force it to just return the linkrev directly.
         repo = self._repo
-        if util.safehasattr(repo, b'forcelinkrev') and repo.forcelinkrev:
+        if hasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
             return self._filelog.linkrev(self._filelog.rev(self._filenode))
         return orig(self, *args, **kwargs)
 
     extensions.wrapfunction(
-        context.basefilectx, b'_adjustlinkrev', _adjustlinkrev
+        context.basefilectx, '_adjustlinkrev', _adjustlinkrev
     )
 
     def _iscmd(orig, cmd):
@@ -241,7 +241,7 @@
             return False
         return orig(cmd)
 
-    extensions.wrapfunction(wireprotoserver, b'iscmd', _iscmd)
+    extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
 
 
 def _loadfileblob(repo, cachepath, path, node):
--- a/hgext/remotefilelog/repack.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/repack.py	Tue Nov 07 15:21:11 2023 +0100
@@ -49,7 +49,7 @@
 
 def fullrepack(repo, options=None):
     """If ``packsonly`` is True, stores creating only loose objects are skipped."""
-    if util.safehasattr(repo, 'shareddatastores'):
+    if hasattr(repo, 'shareddatastores'):
         datasource = contentstore.unioncontentstore(*repo.shareddatastores)
         historysource = metadatastore.unionmetadatastore(
             *repo.sharedhistorystores, allowincomplete=True
@@ -67,7 +67,7 @@
             options=options,
         )
 
-    if util.safehasattr(repo.manifestlog, 'datastore'):
+    if hasattr(repo.manifestlog, 'datastore'):
         localdata, shareddata = _getmanifeststores(repo)
         lpackpath, ldstores, lhstores = localdata
         spackpath, sdstores, shstores = shareddata
@@ -107,7 +107,7 @@
     """This repacks the repo by looking at the distribution of pack files in the
     repo and performing the most minimal repack to keep the repo in good shape.
     """
-    if util.safehasattr(repo, 'shareddatastores'):
+    if hasattr(repo, 'shareddatastores'):
         packpath = shallowutil.getcachepackpath(
             repo, constants.FILEPACK_CATEGORY
         )
@@ -120,7 +120,7 @@
             options=options,
         )
 
-    if util.safehasattr(repo.manifestlog, 'datastore'):
+    if hasattr(repo.manifestlog, 'datastore'):
         localdata, shareddata = _getmanifeststores(repo)
         lpackpath, ldstores, lhstores = localdata
         spackpath, sdstores, shstores = shareddata
@@ -895,7 +895,7 @@
 
 
 def repacklockvfs(repo):
-    if util.safehasattr(repo, 'name'):
+    if hasattr(repo, 'name'):
         # Lock in the shared cache so repacks across multiple copies of the same
         # repo are coordinated.
         sharedcachepath = shallowutil.getcachepackpath(
--- a/hgext/remotefilelog/shallowrepo.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotefilelog/shallowrepo.py	Tue Nov 07 15:21:11 2023 +0100
@@ -340,7 +340,7 @@
     repo.excludepattern = repo.ui.configlist(
         b"remotefilelog", b"excludepattern", None
     )
-    if not util.safehasattr(repo, 'connectionpool'):
+    if not hasattr(repo, 'connectionpool'):
         repo.connectionpool = connectionpool.connectionpool(repo)
 
     if repo.includepattern or repo.excludepattern:
--- a/hgext/remotenames.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/remotenames.py	Tue Nov 07 15:21:11 2023 +0100
@@ -255,7 +255,7 @@
 
 
 def extsetup(ui):
-    extensions.wrapfunction(bookmarks, b'_printbookmarks', wrapprintbookmarks)
+    extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks)
 
 
 def reposetup(ui, repo):
--- a/hgext/schemes.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/schemes.py	Tue Nov 07 15:21:11 2023 +0100
@@ -159,7 +159,7 @@
         else:
             hg.repo_schemes[scheme] = ShortRepository(url, scheme, t)
 
-    extensions.wrapfunction(urlutil, b'hasdriveletter', hasdriveletter)
+    extensions.wrapfunction(urlutil, 'hasdriveletter', hasdriveletter)
 
 
 @command(b'debugexpandscheme', norepo=True)
--- a/hgext/share.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/share.py	Tue Nov 07 15:21:11 2023 +0100
@@ -162,9 +162,9 @@
 
 
 def extsetup(ui):
-    extensions.wrapfunction(bookmarks, b'_getbkfile', getbkfile)
-    extensions.wrapfunction(bookmarks.bmstore, b'_recordchange', recordchange)
-    extensions.wrapfunction(bookmarks.bmstore, b'_writerepo', writerepo)
+    extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
+    extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
+    extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
     extensions.wrapcommand(commands.table, b'clone', clone)
 
 
--- a/hgext/sparse.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/sparse.py	Tue Nov 07 15:21:11 2023 +0100
@@ -73,7 +73,6 @@
 
 
 from mercurial.i18n import _
-from mercurial.pycompat import setattr
 from mercurial import (
     cmdutil,
     commands,
@@ -146,7 +145,7 @@
             revs = revs.filter(ctxmatch)
         return revs
 
-    extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
+    extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs)
 
 
 def _clonesparsecmd(orig, ui, repo, *args, **opts):
@@ -170,7 +169,7 @@
             )
             return orig(ctx, *args, **kwargs)
 
-        extensions.wrapfunction(mergemod, b'update', clonesparse)
+        extensions.wrapfunction(mergemod, 'update', clonesparse)
     return orig(ui, repo, *args, **opts)
 
 
--- a/hgext/split.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/split.py	Tue Nov 07 15:21:11 2023 +0100
@@ -22,7 +22,6 @@
     error,
     hg,
     logcmdutil,
-    pycompat,
     registrar,
     revsetlang,
     rewriteutil,
@@ -65,10 +64,9 @@
     By default, rebase connected non-obsoleted descendants onto the new
     changeset. Use --no-rebase to avoid the rebase.
     """
-    opts = pycompat.byteskwargs(opts)
     revlist = []
-    if opts.get(b'rev'):
-        revlist.append(opts.get(b'rev'))
+    if opts.get('rev'):
+        revlist.append(opts.get('rev'))
     revlist.extend(revs)
     with repo.wlock(), repo.lock():
         tr = repo.transaction(b'split')
@@ -89,7 +87,7 @@
             if ctx.node() is None:
                 raise error.InputError(_(b'cannot split working directory'))
 
-            if opts.get(b'rebase'):
+            if opts.get('rebase'):
                 # Skip obsoleted descendants and their descendants so the rebase
                 # won't cause conflicts for sure.
                 descendants = list(repo.revs(b'(%d::) - (%d)', rev, rev))
@@ -116,7 +114,7 @@
             wnode = repo[b'.'].node()
             top = None
             try:
-                top = dosplit(ui, repo, tr, ctx, opts)
+                top = dosplit(ui, repo, tr, ctx, **opts)
             finally:
                 # top is None: split failed, need update --clean recovery.
                 # wnode == ctx.node(): wnode split, no need to update.
@@ -128,7 +126,7 @@
                 dorebase(ui, repo, torebase, top)
 
 
-def dosplit(ui, repo, tr, ctx, opts):
+def dosplit(ui, repo, tr, ctx, **opts):
     committed = []  # [ctx]
 
     # Set working parent to ctx.p1(), and keep working copy as ctx's content
@@ -166,13 +164,13 @@
             ) % short(ctx.node())
         opts.update(
             {
-                b'edit': True,
-                b'interactive': True,
-                b'message': header + ctx.description(),
+                'edit': True,
+                'interactive': True,
+                'message': header + ctx.description(),
             }
         )
         origctx = repo[b'.']
-        commands.commit(ui, repo, **pycompat.strkwargs(opts))
+        commands.commit(ui, repo, **opts)
         newctx = repo[b'.']
         # Ensure user didn't do a "no-op" split (such as deselecting
         # everything).
--- a/hgext/sqlitestore.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/sqlitestore.py	Tue Nov 07 15:21:11 2023 +0100
@@ -1330,11 +1330,11 @@
 def extsetup(ui):
     localrepo.featuresetupfuncs.add(featuresetup)
     extensions.wrapfunction(
-        localrepo, b'newreporequirements', newreporequirements
+        localrepo, 'newreporequirements', newreporequirements
     )
-    extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
-    extensions.wrapfunction(localrepo, b'makemain', makemain)
-    extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
+    extensions.wrapfunction(localrepo, 'makefilestorage', makefilestorage)
+    extensions.wrapfunction(localrepo, 'makemain', makemain)
+    extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
 
 
 def reposetup(ui, repo):
--- a/hgext/uncommit.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/uncommit.py	Tue Nov 07 15:21:11 2023 +0100
@@ -152,7 +152,6 @@
     """
     cmdutil.check_note_size(opts)
     cmdutil.resolve_commit_options(ui, opts)
-    opts = pycompat.byteskwargs(opts)
 
     with repo.wlock(), repo.lock():
 
@@ -160,7 +159,7 @@
         m, a, r, d = st.modified, st.added, st.removed, st.deleted
         isdirtypath = any(set(m + a + r + d) & set(pats))
         allowdirtywcopy = opts[
-            b'allow_dirty_working_copy'
+            'allow_dirty_working_copy'
         ] or repo.ui.configbool(b'experimental', b'uncommitondirtywdir')
         if not allowdirtywcopy and (not pats or isdirtypath):
             cmdutil.bailifchanged(
@@ -172,7 +171,7 @@
         if len(old.parents()) > 1:
             raise error.InputError(_(b"cannot uncommit merge changeset"))
 
-        match = scmutil.match(old, pats, opts)
+        match = scmutil.match(old, pats, pycompat.byteskwargs(opts))
 
         # Check all explicitly given files; abort if there's a problem.
         if match.files():
@@ -203,14 +202,14 @@
                 )
 
         with repo.transaction(b'uncommit'):
-            if not (opts[b'message'] or opts[b'logfile']):
-                opts[b'message'] = old.description()
-            message = cmdutil.logmessage(ui, opts)
+            if not (opts['message'] or opts['logfile']):
+                opts['message'] = old.description()
+            message = cmdutil.logmessage(ui, pycompat.byteskwargs(opts))
 
             keepcommit = pats
             if not keepcommit:
-                if opts.get(b'keep') is not None:
-                    keepcommit = opts.get(b'keep')
+                if opts.get('keep') is not None:
+                    keepcommit = opts.get('keep')
                 else:
                     keepcommit = ui.configbool(
                         b'experimental', b'uncommit.keep'
@@ -221,8 +220,8 @@
                 match,
                 keepcommit,
                 message=message,
-                user=opts.get(b'user'),
-                date=opts.get(b'date'),
+                user=opts.get('user'),
+                date=opts.get('date'),
             )
             if newid is None:
                 ui.status(_(b"nothing to uncommit\n"))
--- a/hgext/win32mbcs.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/win32mbcs.py	Tue Nov 07 15:21:11 2023 +0100
@@ -49,7 +49,6 @@
 import sys
 
 from mercurial.i18n import _
-from mercurial.pycompat import getattr, setattr
 from mercurial import (
     encoding,
     error,
--- a/hgext/zeroconf/__init__.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/hgext/zeroconf/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -233,10 +233,10 @@
             server.close()
 
 
-extensions.wrapfunction(dispatch, b'_runcommand', cleanupafterdispatch)
+extensions.wrapfunction(dispatch, '_runcommand', cleanupafterdispatch)
 
-extensions.wrapfunction(uimod.ui, b'config', config)
-extensions.wrapfunction(uimod.ui, b'configitems', configitems)
-extensions.wrapfunction(uimod.ui, b'configsuboptions', configsuboptions)
-extensions.wrapfunction(hg, b'defaultdest', defaultdest)
-extensions.wrapfunction(servermod, b'create_server', zc_create_server)
+extensions.wrapfunction(uimod.ui, 'config', config)
+extensions.wrapfunction(uimod.ui, 'configitems', configitems)
+extensions.wrapfunction(uimod.ui, 'configsuboptions', configsuboptions)
+extensions.wrapfunction(hg, 'defaultdest', defaultdest)
+extensions.wrapfunction(servermod, 'create_server', zc_create_server)
--- a/i18n/ja.po	Mon Nov 06 15:38:27 2023 +0100
+++ b/i18n/ja.po	Tue Nov 07 15:21:11 2023 +0100
@@ -5875,13 +5875,6 @@
 msgstr "共有元情報を相対パスで保持 (実験的実装)"
 
 msgid ""
-"    [infinitepush]\n"
-"    # Server-side and client-side option. Pattern of the infinitepush "
-"bookmark\n"
-"    branchpattern = PATTERN"
-msgstr ""
-
-msgid ""
 "    # Server or client\n"
 "    server = False"
 msgstr ""
@@ -5973,12 +5966,6 @@
 msgstr ""
 
 msgid ""
-"    # Instructs infinitepush to forward all received bundle2 parts to the\n"
-"    # bundle for storage. Defaults to False.\n"
-"    storeallparts = True"
-msgstr ""
-
-msgid ""
 "    # routes each incoming push to the bundlestore. defaults to False\n"
 "    pushtobundlestore = True"
 msgstr ""
@@ -5991,24 +5978,10 @@
 "    bookmarks = True\n"
 msgstr ""
 
-msgid "please set infinitepush.sqlhost"
-msgstr ""
-
-msgid "please set infinitepush.reponame"
-msgstr ""
-
 #, fuzzy, python-format
 msgid "invalid log level %s"
 msgstr "不正なローカルアドレス: %s"
 
-#, fuzzy, python-format
-msgid "unknown infinitepush store type specified %s"
-msgstr "--type に未知のバンドル種別が指定されました"
-
-#, fuzzy, python-format
-msgid "unknown infinitepush index type specified %s"
-msgstr "--type に未知のバンドル種別が指定されました"
-
 #, fuzzy
 msgid "force push to go to bundle store (EXPERIMENTAL)"
 msgstr "表示対象リビジョン"
@@ -6019,10 +5992,6 @@
 msgid "see 'hg help config.paths'"
 msgstr "詳細は 'hg help config.paths' 参照"
 
-#, fuzzy
-msgid "infinitepush bookmark '{}' does not exist in path '{}'"
-msgstr "ブックマーク '%s' は存在しません"
-
 msgid "no changes found\n"
 msgstr "差分はありません\n"
 
--- a/i18n/pt_BR.po	Mon Nov 06 15:38:27 2023 +0100
+++ b/i18n/pt_BR.po	Tue Nov 07 15:21:11 2023 +0100
@@ -5940,12 +5940,6 @@
 msgstr ""
 
 msgid ""
-"    [infinitepush]\n"
-"    # Server-side and client-side option. Pattern of the infinitepush bookmark\n"
-"    branchpattern = PATTERN"
-msgstr ""
-
-msgid ""
 "    # Server or client\n"
 "    server = False"
 msgstr ""
@@ -6034,12 +6028,6 @@
 msgstr ""
 
 msgid ""
-"    # Instructs infinitepush to forward all received bundle2 parts to the\n"
-"    # bundle for storage. Defaults to False.\n"
-"    storeallparts = True"
-msgstr ""
-
-msgid ""
 "    # routes each incoming push to the bundlestore. defaults to False\n"
 "    pushtobundlestore = True"
 msgstr ""
@@ -6052,24 +6040,10 @@
 "    bookmarks = True\n"
 msgstr ""
 
-msgid "please set infinitepush.sqlhost"
-msgstr ""
-
-msgid "please set infinitepush.reponame"
-msgstr ""
-
 #, python-format
 msgid "invalid log level %s"
 msgstr ""
 
-#, python-format
-msgid "unknown infinitepush store type specified %s"
-msgstr ""
-
-#, python-format
-msgid "unknown infinitepush index type specified %s"
-msgstr ""
-
 msgid "force push to go to bundle store (EXPERIMENTAL)"
 msgstr ""
 
@@ -6079,9 +6053,6 @@
 msgid "see 'hg help config.paths'"
 msgstr "veja 'hg help config.paths'"
 
-msgid "infinitepush bookmark '{}' does not exist in path '{}'"
-msgstr ""
-
 msgid "no changes found\n"
 msgstr "nenhuma alteração encontrada\n"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/admin/verify.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,341 @@
+# admin/verify.py - better repository integrity checking for Mercurial
+#
+# Copyright 2023 Octobus <contact@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import collections
+import copy
+import functools
+
+from ..i18n import _
+from .. import error, pycompat, registrar, requirements
+from ..utils import stringutil
+
+
+verify_table = {}
+verify_alias_table = {}
+check = registrar.verify_check(verify_table, verify_alias_table)
+
+
+# Use this to declare options/aliases in the middle of the hierarchy.
+# Checks like these are not run themselves and cannot have a body.
+# For an example, see the `revlogs` check.
+def noop_func(*args, **kwargs):
+    return
+
+
+@check(b"working-copy.dirstate", alias=b"dirstate")
+def check_dirstate(ui, repo, **options):
+    ui.status(_(b"checking dirstate\n"))
+
+    parent1, parent2 = repo.dirstate.parents()
+    m1 = repo[parent1].manifest()
+    m2 = repo[parent2].manifest()
+    errors = 0
+
+    is_narrow = requirements.NARROW_REQUIREMENT in repo.requirements
+    narrow_matcher = repo.narrowmatch() if is_narrow else None
+
+    for err in repo.dirstate.verify(m1, m2, narrow_matcher):
+        ui.warn(err[0] % err[1:])
+        errors += 1
+
+    return errors
+
+
+# Tree of all checks and their associated function
+pyramid = {}
+
+
+def build_pyramid(table, full_pyramid):
+    """Create a pyramid of checks of the registered checks.
+    It is a name-based hierarchy that can be arbitrarily nested."""
+    for entry, func in sorted(table.items(), key=lambda x: x[0], reverse=True):
+        cursor = full_pyramid
+        levels = entry.split(b".")
+        for level in levels[:-1]:
+            current_node = cursor.setdefault(level, {})
+            cursor = current_node
+        if cursor.get(levels[-1]) is None:
+            cursor[levels[-1]] = (entry, func)
+        elif func is not noop_func:
+            m = b"intermediate checks need to use `verify.noop_func`"
+            raise error.ProgrammingError(m)
+
+
+def find_checks(name, table=None, alias_table=None, full_pyramid=None):
+    """Find all checks for a given name and returns a dict of
+    (qualified_check_name, check_function)
+
+    # Examples
+
+    Using a full qualified name:
+    "working-copy.dirstate" -> {
+        "working-copy.dirstate": CF,
+    }
+
+    Using a *prefix* of a qualified name:
+    "store.revlogs" -> {
+        "store.revlogs.changelog": CF,
+        "store.revlogs.manifestlog": CF,
+        "store.revlogs.filelog": CF,
+    }
+
+    Using a defined alias:
+    "revlogs" -> {
+        "store.revlogs.changelog": CF,
+        "store.revlogs.manifestlog": CF,
+        "store.revlogs.filelog": CF,
+    }
+
+    Using something that is none of the above will be an error.
+    """
+    if table is None:
+        table = verify_table
+    if alias_table is None:
+        alias_table = verify_alias_table
+
+    if name == b"full":
+        return table
+    checks = {}
+
+    # is it a full name?
+    check = table.get(name)
+
+    if check is None:
+        # is it an alias?
+        qualified_name = alias_table.get(name)
+        if qualified_name is not None:
+            name = qualified_name
+            check = table.get(name)
+        else:
+            split = name.split(b".", 1)
+            if len(split) == 2:
+                # split[0] can be an alias
+                qualified_name = alias_table.get(split[0])
+                if qualified_name is not None:
+                    name = b"%s.%s" % (qualified_name, split[1])
+                    check = table.get(name)
+    else:
+        qualified_name = name
+
+    # Maybe it's a subtree in the check hierarchy that does not
+    # have an explicit alias.
+    levels = name.split(b".")
+    if full_pyramid is not None:
+        if not full_pyramid:
+            build_pyramid(table, full_pyramid)
+
+        pyramid.clear()
+        pyramid.update(full_pyramid.items())
+    else:
+        build_pyramid(table, pyramid)
+
+    subtree = pyramid
+    # Find subtree
+    for level in levels:
+        subtree = subtree.get(level)
+        if subtree is None:
+            hint = error.getsimilar(list(alias_table) + list(table), name)
+            hint = error.similarity_hint(hint)
+
+            raise error.InputError(_(b"unknown check %s" % name), hint=hint)
+
+    # Get all checks in that subtree
+    if isinstance(subtree, dict):
+        stack = list(subtree.items())
+        while stack:
+            current_name, entry = stack.pop()
+            if isinstance(entry, dict):
+                stack.extend(entry.items())
+            else:
+                # (qualified_name, func)
+                checks[entry[0]] = entry[1]
+    else:
+        checks[name] = check
+
+    return checks
+
+
+def pass_options(
+    ui,
+    checks,
+    options,
+    table=None,
+    alias_table=None,
+    full_pyramid=None,
+):
+    """Given a dict of checks (fully qualified name to function), and a list
+    of options as given by the user, pass each option down to the right check
+    function."""
+    ui.debug(b"passing options to check functions\n")
+    to_modify = collections.defaultdict(dict)
+
+    if not checks:
+        raise error.Error(_(b"`checks` required"))
+
+    for option in sorted(options):
+        split = option.split(b":")
+        hint = _(
+            b"syntax is 'check:option=value', "
+            b"eg. revlogs.changelog:copies=yes"
+        )
+        option_error = error.InputError(
+            _(b"invalid option '%s'") % option, hint=hint
+        )
+        if len(split) != 2:
+            raise option_error
+
+        check_name, option_value = split
+        if not option_value:
+            raise option_error
+
+        split = option_value.split(b"=")
+        if len(split) != 2:
+            raise option_error
+
+        option_name, value = split
+        if not value:
+            raise option_error
+
+        path = b"%s:%s" % (check_name, option_name)
+
+        matching_checks = find_checks(
+            check_name,
+            table=table,
+            alias_table=alias_table,
+            full_pyramid=full_pyramid,
+        )
+        for name in matching_checks:
+            check = checks.get(name)
+            if check is None:
+                msg = _(b"specified option '%s' for unselected check '%s'\n")
+                raise error.InputError(msg % (name, option_name))
+
+            assert hasattr(check, "func")  # help Pytype
+
+            if not hasattr(check.func, "options"):
+                raise error.InputError(
+                    _(b"check '%s' has no option '%s'") % (name, option_name)
+                )
+
+            try:
+                matching_option = next(
+                    (o for o in check.func.options if o[0] == option_name)
+                )
+            except StopIteration:
+                raise error.InputError(
+                    _(b"check '%s' has no option '%s'") % (name, option_name)
+                )
+
+            # transform the argument from cli string to the expected Python type
+            _name, typ, _docstring = matching_option
+
+            as_typed = None
+            if isinstance(typ, bool):
+                as_bool = stringutil.parsebool(value)
+                if as_bool is None:
+                    raise error.InputError(
+                        _(b"'%s' is not a boolean ('%s')") % (path, value)
+                    )
+                as_typed = as_bool
+            elif isinstance(typ, list):
+                as_list = stringutil.parselist(value)
+                if as_list is None:
+                    raise error.InputError(
+                        _(b"'%s' is not a list ('%s')") % (path, value)
+                    )
+                as_typed = as_list
+            else:
+                raise error.ProgrammingError(b"unsupported type %s", type(typ))
+
+            if option_name in to_modify[name]:
+                raise error.InputError(
+                    _(b"duplicated option '%s' for '%s'") % (option_name, name)
+                )
+            else:
+                assert as_typed is not None
+                to_modify[name][option_name] = as_typed
+
+    # Manage case where a check is set but without command line options
+    # it will later be set with default check options values
+    for name, f in checks.items():
+        if name not in to_modify:
+            to_modify[name] = {}
+
+    # Merge default options with command line options
+    for check_name, cmd_options in to_modify.items():
+        check = checks.get(check_name)
+        func = checks[check_name]
+        merged_options = {}
+        # help Pytype
+        assert check is not None
+        assert check.func is not None
+        assert hasattr(check.func, "options")
+
+        if check.func.options:
+            # copy the default value in case it's mutable (list, etc.)
+            merged_options = {
+                o[0]: copy.deepcopy(o[1]) for o in check.func.options
+            }
+            if cmd_options:
+                for k, v in cmd_options.items():
+                    merged_options[k] = v
+        options = pycompat.strkwargs(merged_options)
+        checks[check_name] = functools.partial(func, **options)
+        ui.debug(b"merged options for '%s': '%r'\n" % (check_name, options))
+
+    return checks
+
+
+def get_checks(
+    repo,
+    ui,
+    names=None,
+    options=None,
+    table=None,
+    alias_table=None,
+    full_pyramid=None,
+):
+    """Given a list of function names and optionally a list of
+    options, return matched checks with merged options (command line options
+    values take precedence on default ones)
+
+    It runs find checks, then resolve options and returns a dict of matched
+    functions with resolved options.
+    """
+    funcs = {}
+
+    if names is None:
+        names = []
+
+    if options is None:
+        options = []
+
+    # find checks
+    for name in names:
+        matched = find_checks(
+            name,
+            table=table,
+            alias_table=alias_table,
+            full_pyramid=full_pyramid,
+        )
+        matched_names = b", ".join(matched)
+        ui.debug(b"found checks '%s' for name '%s'\n" % (matched_names, name))
+        funcs.update(matched)
+
+    funcs = {n: functools.partial(f, ui, repo) for n, f in funcs.items()}
+
+    # resolve options
+    checks = pass_options(
+        ui,
+        funcs,
+        options,
+        table=table,
+        alias_table=alias_table,
+        full_pyramid=full_pyramid,
+    )
+
+    return checks
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/admin_commands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,49 @@
+# admin_commands.py - command processing for admin* commands
+#
+# Copyright 2022 Mercurial Developers
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from .i18n import _
+from .admin import verify
+from . import error, registrar, transaction
+
+
+table = {}
+command = registrar.command(table)
+
+
+@command(
+    b'admin::verify',
+    [
+        (b'c', b'check', [], _(b'add a check'), _(b'CHECK')),
+        (b'o', b'option', [], _(b'pass an option to a check'), _(b'OPTION')),
+    ],
+    helpcategory=command.CATEGORY_MAINTENANCE,
+)
+def admin_verify(ui, repo, **opts):
+    """verify the integrity of the repository
+
+    Alternative UI to `hg verify` with a lot more control over the
+    verification process and better error reporting.
+    """
+
+    if not repo.url().startswith(b'file:'):
+        raise error.Abort(_(b"cannot verify bundle or remote repos"))
+
+    if transaction.has_abandoned_transaction(repo):
+        ui.warn(_(b"abandoned transaction found - run hg recover\n"))
+
+    checks = opts.get("check", [])
+    options = opts.get("option", [])
+
+    funcs = verify.get_checks(repo, ui, names=checks, options=options)
+
+    ui.status(_(b"running %d checks\n") % len(funcs))
+    # Done in two times so the execution is separated from the resolving step
+    for name, func in sorted(funcs.items(), key=lambda x: x[0]):
+        ui.status(_(b"running %s\n") % name)
+        errors = func()
+        if errors:
+            ui.warn(_(b"found %d errors\n") % len(errors))
--- a/mercurial/bookmarks.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/bookmarks.py	Tue Nov 07 15:21:11 2023 +0100
@@ -14,7 +14,6 @@
     hex,
     short,
 )
-from .pycompat import getattr
 from . import (
     encoding,
     error,
--- a/mercurial/bundle2.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/bundle2.py	Tue Nov 07 15:21:11 2023 +0100
@@ -980,7 +980,7 @@
 
     def close(self):
         """close underlying file"""
-        if util.safehasattr(self._fp, 'close'):
+        if hasattr(self._fp, 'close'):
             return self._fp.close()
 
 
@@ -1068,7 +1068,7 @@
 
         The new part have the very same content but no partid assigned yet.
         Parts with generated data cannot be copied."""
-        assert not util.safehasattr(self.data, 'next')
+        assert not hasattr(self.data, 'next')
         return self.__class__(
             self.type,
             self._mandatoryparams,
@@ -1137,9 +1137,7 @@
                 msg.append(b')')
             if not self.data:
                 msg.append(b' empty payload')
-            elif util.safehasattr(self.data, 'next') or util.safehasattr(
-                self.data, b'__next__'
-            ):
+            elif hasattr(self.data, 'next') or hasattr(self.data, '__next__'):
                 msg.append(b' streamed payload')
             else:
                 msg.append(b' %i bytes payload' % len(self.data))
@@ -1233,9 +1231,7 @@
         Exists to handle the different methods to provide data to a part."""
         # we only support fixed size data now.
         # This will be improved in the future.
-        if util.safehasattr(self.data, 'next') or util.safehasattr(
-            self.data, '__next__'
-        ):
+        if hasattr(self.data, 'next') or hasattr(self.data, '__next__'):
             buff = util.chunkbuffer(self.data)
             chunk = buff.read(preferedchunksize)
             while chunk:
@@ -1380,9 +1376,7 @@
 
     def __init__(self, ui, header, fp):
         super(unbundlepart, self).__init__(fp)
-        self._seekable = util.safehasattr(fp, 'seek') and util.safehasattr(
-            fp, 'tell'
-        )
+        self._seekable = hasattr(fp, 'seek') and hasattr(fp, 'tell')
         self.ui = ui
         # unbundle state attr
         self._headerdata = header
--- a/mercurial/bundlerepo.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/bundlerepo.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,6 +12,7 @@
 """
 
 
+import contextlib
 import os
 import shutil
 
@@ -108,7 +109,15 @@
             self.bundlerevs.add(n)
             n += 1
 
-    def _chunk(self, rev, df=None):
+    @contextlib.contextmanager
+    def reading(self):
+        if self.repotiprev < 0:
+            yield
+        else:
+            with super().reading() as x:
+                yield x
+
+    def _chunk(self, rev):
         # Warning: in case of bundle, the diff is against what we stored as
         # delta base, not against rev - 1
         # XXX: could use some caching
@@ -129,7 +138,7 @@
 
         return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
 
-    def _rawtext(self, node, rev, _df=None):
+    def _rawtext(self, node, rev):
         if rev is None:
             rev = self.rev(node)
         validated = False
@@ -138,8 +147,11 @@
         iterrev = rev
         # reconstruct the revision if it is from a changegroup
         while iterrev > self.repotiprev:
-            if self._revisioncache and self._revisioncache[1] == iterrev:
-                rawtext = self._revisioncache[2]
+            if (
+                self._inner._revisioncache
+                and self._inner._revisioncache[1] == iterrev
+            ):
+                rawtext = self._inner._revisioncache[2]
                 break
             chain.append(iterrev)
             iterrev = self.index[iterrev][3]
@@ -147,7 +159,8 @@
             rawtext = b''
         elif rawtext is None:
             r = super(bundlerevlog, self)._rawtext(
-                self.node(iterrev), iterrev, _df=_df
+                self.node(iterrev),
+                iterrev,
             )
             __, rawtext, validated = r
         if chain:
@@ -194,6 +207,8 @@
         dirlogstarts=None,
         dir=b'',
     ):
+        # XXX manifestrevlog is not actually a revlog , so mixing it with
+        # bundlerevlog is not a good idea.
         manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
         bundlerevlog.__init__(
             self,
@@ -245,7 +260,7 @@
 class bundlephasecache(phases.phasecache):
     def __init__(self, *args, **kwargs):
         super(bundlephasecache, self).__init__(*args, **kwargs)
-        if util.safehasattr(self, 'opener'):
+        if hasattr(self, 'opener'):
             self.opener = vfsmod.readonlyvfs(self.opener)
 
     def write(self):
--- a/mercurial/changegroup.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/changegroup.py	Tue Nov 07 15:21:11 2023 +0100
@@ -1043,7 +1043,7 @@
                         return i
                 # We failed to resolve a parent for this node, so
                 # we crash the changegroup construction.
-                if util.safehasattr(store, 'target'):
+                if hasattr(store, 'target'):
                     target = store.display_id
                 else:
                     # some revlog not actually a revlog
--- a/mercurial/changelog.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/changelog.py	Tue Nov 07 15:21:11 2023 +0100
@@ -91,102 +91,6 @@
     return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
 
 
-class appender:
-    """the changelog index must be updated last on disk, so we use this class
-    to delay writes to it"""
-
-    def __init__(self, vfs, name, mode, buf):
-        self.data = buf
-        fp = vfs(name, mode)
-        self.fp = fp
-        self.offset = fp.tell()
-        self.size = vfs.fstat(fp).st_size
-        self._end = self.size
-
-    def end(self):
-        return self._end
-
-    def tell(self):
-        return self.offset
-
-    def flush(self):
-        pass
-
-    @property
-    def closed(self):
-        return self.fp.closed
-
-    def close(self):
-        self.fp.close()
-
-    def seek(self, offset, whence=0):
-        '''virtual file offset spans real file and data'''
-        if whence == 0:
-            self.offset = offset
-        elif whence == 1:
-            self.offset += offset
-        elif whence == 2:
-            self.offset = self.end() + offset
-        if self.offset < self.size:
-            self.fp.seek(self.offset)
-
-    def read(self, count=-1):
-        '''only trick here is reads that span real file and data'''
-        ret = b""
-        if self.offset < self.size:
-            s = self.fp.read(count)
-            ret = s
-            self.offset += len(s)
-            if count > 0:
-                count -= len(s)
-        if count != 0:
-            doff = self.offset - self.size
-            self.data.insert(0, b"".join(self.data))
-            del self.data[1:]
-            s = self.data[0][doff : doff + count]
-            self.offset += len(s)
-            ret += s
-        return ret
-
-    def write(self, s):
-        self.data.append(bytes(s))
-        self.offset += len(s)
-        self._end += len(s)
-
-    def __enter__(self):
-        self.fp.__enter__()
-        return self
-
-    def __exit__(self, *args):
-        return self.fp.__exit__(*args)
-
-
-class _divertopener:
-    def __init__(self, opener, target):
-        self._opener = opener
-        self._target = target
-
-    def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
-        if name != self._target:
-            return self._opener(name, mode, **kwargs)
-        return self._opener(name + b".a", mode, **kwargs)
-
-    def __getattr__(self, attr):
-        return getattr(self._opener, attr)
-
-
-def _delayopener(opener, target, buf):
-    """build an opener that stores chunks in 'buf' instead of 'target'"""
-
-    def _delay(name, mode=b'r', checkambig=False, **kwargs):
-        if name != target:
-            return opener(name, mode, **kwargs)
-        assert not kwargs
-        return appender(opener, name, mode, buf)
-
-    return _delay
-
-
 @attr.s
 class _changelogrevision:
     # Extensions might modify _defaultextra, so let the constructor below pass
@@ -410,17 +314,14 @@
             # changelogs don't benefit from generaldelta.
 
             self._format_flags &= ~revlog.FLAG_GENERALDELTA
-            self._generaldelta = False
+            self.delta_config.general_delta = False
 
         # Delta chains for changelogs tend to be very small because entries
         # tend to be small and don't delta well with each. So disable delta
         # chains.
         self._storedeltachains = False
 
-        self._realopener = opener
-        self._delayed = False
-        self._delaybuf = None
-        self._divert = False
+        self._v2_delayed = False
         self._filteredrevs = frozenset()
         self._filteredrevs_hashcache = {}
         self._copiesstorage = opener.options.get(b'copies-storage')
@@ -437,83 +338,50 @@
         self._filteredrevs_hashcache = {}
 
     def _write_docket(self, tr):
-        if not self._delayed:
+        if not self._v2_delayed:
             super(changelog, self)._write_docket(tr)
 
     def delayupdate(self, tr):
         """delay visibility of index updates to other readers"""
-        if self._docket is None and not self._delayed:
-            if len(self) == 0:
-                self._divert = True
-                if self._realopener.exists(self._indexfile + b'.a'):
-                    self._realopener.unlink(self._indexfile + b'.a')
-                self.opener = _divertopener(self._realopener, self._indexfile)
-            else:
-                self._delaybuf = []
-                self.opener = _delayopener(
-                    self._realopener, self._indexfile, self._delaybuf
-                )
-            self._segmentfile.opener = self.opener
-            self._segmentfile_sidedata.opener = self.opener
-        self._delayed = True
+        assert not self._inner.is_open
+        if self._docket is not None:
+            self._v2_delayed = True
+        else:
+            new_index = self._inner.delay()
+            if new_index is not None:
+                self._indexfile = new_index
+                tr.registertmp(new_index)
         tr.addpending(b'cl-%i' % id(self), self._writepending)
         tr.addfinalize(b'cl-%i' % id(self), self._finalize)
 
     def _finalize(self, tr):
         """finalize index updates"""
-        self._delayed = False
-        self.opener = self._realopener
-        self._segmentfile.opener = self.opener
-        self._segmentfile_sidedata.opener = self.opener
-        # move redirected index data back into place
+        assert not self._inner.is_open
         if self._docket is not None:
-            self._write_docket(tr)
-        elif self._divert:
-            assert not self._delaybuf
-            tmpname = self._indexfile + b".a"
-            nfile = self.opener.open(tmpname)
-            nfile.close()
-            self.opener.rename(tmpname, self._indexfile, checkambig=True)
-        elif self._delaybuf:
-            fp = self.opener(self._indexfile, b'a', checkambig=True)
-            fp.write(b"".join(self._delaybuf))
-            fp.close()
-            self._delaybuf = None
-        self._divert = False
-        # split when we're done
-        self._enforceinlinesize(tr, side_write=False)
+            self._docket.write(tr)
+            self._v2_delayed = False
+        else:
+            new_index_file = self._inner.finalize_pending()
+            self._indexfile = new_index_file
+            # split when we're done
+            self._enforceinlinesize(tr, side_write=False)
 
     def _writepending(self, tr):
         """create a file containing the unfinalized state for
         pretxnchangegroup"""
+        assert not self._inner.is_open
         if self._docket:
-            return self._docket.write(tr, pending=True)
-        if self._delaybuf:
-            # make a temporary copy of the index
-            fp1 = self._realopener(self._indexfile)
-            pendingfilename = self._indexfile + b".a"
-            # register as a temp file to ensure cleanup on failure
-            tr.registertmp(pendingfilename)
-            # write existing data
-            fp2 = self._realopener(pendingfilename, b"w")
-            fp2.write(fp1.read())
-            # add pending data
-            fp2.write(b"".join(self._delaybuf))
-            fp2.close()
-            # switch modes so finalize can simply rename
-            self._delaybuf = None
-            self._divert = True
-            self.opener = _divertopener(self._realopener, self._indexfile)
-            self._segmentfile.opener = self.opener
-            self._segmentfile_sidedata.opener = self.opener
-
-        if self._divert:
-            return True
-
-        return False
+            any_pending = self._docket.write(tr, pending=True)
+            self._v2_delayed = False
+        else:
+            new_index, any_pending = self._inner.write_pending()
+            if new_index is not None:
+                self._indexfile = new_index
+                tr.registertmp(new_index)
+        return any_pending
 
     def _enforceinlinesize(self, tr, side_write=True):
-        if not self._delayed:
+        if not self.is_delaying:
             revlog.revlog._enforceinlinesize(self, tr, side_write=side_write)
 
     def read(self, nodeorrev):
--- a/mercurial/chgserver.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/chgserver.py	Tue Nov 07 15:21:11 2023 +0100
@@ -49,10 +49,6 @@
 import time
 
 from .i18n import _
-from .pycompat import (
-    getattr,
-    setattr,
-)
 from .node import hex
 
 from . import (
@@ -236,7 +232,7 @@
             # will behave differently (i.e. write to stdout).
             if (
                 out is not self.fout
-                or not util.safehasattr(self.fout, 'fileno')
+                or not hasattr(self.fout, 'fileno')
                 or self.fout.fileno() != procutil.stdout.fileno()
                 or self._finoutredirected
             ):
@@ -260,9 +256,9 @@
     from . import dispatch  # avoid cycle
 
     newui = srcui.__class__.load()
-    for a in [b'fin', b'fout', b'ferr', b'environ']:
+    for a in ['fin', 'fout', 'ferr', 'environ']:
         setattr(newui, a, getattr(srcui, a))
-    if util.safehasattr(srcui, '_csystem'):
+    if hasattr(srcui, '_csystem'):
         newui._csystem = srcui._csystem
 
     # command line args
@@ -348,9 +344,9 @@
 
 _iochannels = [
     # server.ch, ui.fp, mode
-    (b'cin', b'fin', 'rb'),
-    (b'cout', b'fout', 'wb'),
-    (b'cerr', b'ferr', 'wb'),
+    ('cin', 'fin', 'rb'),
+    ('cout', 'fout', 'wb'),
+    ('cerr', 'ferr', 'wb'),
 ]
 
 
@@ -603,7 +599,7 @@
         }
     )
 
-    if util.safehasattr(procutil, 'setprocname'):
+    if hasattr(procutil, 'setprocname'):
 
         def setprocname(self):
             """Change process title"""
--- a/mercurial/cmdutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/cmdutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -28,9 +28,7 @@
     short,
 )
 from .pycompat import (
-    getattr,
     open,
-    setattr,
 )
 from .thirdparty import attr
 
@@ -813,18 +811,17 @@
     # creating a dirnode object for the root of the repo
     rootobj = dirnode(b'')
     pstatus = (
-        b'modified',
-        b'added',
-        b'deleted',
-        b'clean',
-        b'unknown',
-        b'ignored',
-        b'removed',
+        ('modified', b'm'),
+        ('added', b'a'),
+        ('deleted', b'd'),
+        ('clean', b'c'),
+        ('unknown', b'u'),
+        ('ignored', b'i'),
+        ('removed', b'r'),
     )
 
     tersedict = {}
-    for attrname in pstatus:
-        statuschar = attrname[0:1]
+    for attrname, statuschar in pstatus:
         for f in getattr(statuslist, attrname):
             rootobj.addfile(f, statuschar)
         tersedict[statuschar] = []
@@ -1007,7 +1004,7 @@
     raise error.UnknownCommand(cmd, allcmds)
 
 
-def changebranch(ui, repo, revs, label, opts):
+def changebranch(ui, repo, revs, label, **opts):
     """Change the branch name of given revs to label"""
 
     with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
@@ -1026,7 +1023,7 @@
         root = repo[roots.first()]
         rpb = {parent.branch() for parent in root.parents()}
         if (
-            not opts.get(b'force')
+            not opts.get('force')
             and label not in rpb
             and label in repo.branchmap()
         ):
@@ -1450,7 +1447,7 @@
         if returnrevlog:
             if isinstance(r, revlog.revlog):
                 pass
-            elif util.safehasattr(r, '_revlog'):
+            elif hasattr(r, '_revlog'):
                 r = r._revlog  # pytype: disable=attribute-error
             elif r is not None:
                 raise error.InputError(
@@ -3329,9 +3326,7 @@
     return b"\n".join(edittext)
 
 
-def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
-    if opts is None:
-        opts = {}
+def commitstatus(repo, node, branch, bheads=None, tip=None, **opts):
     ctx = repo[node]
     parents = ctx.parents()
 
@@ -3341,7 +3336,7 @@
         # for most instances
         repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
     elif (
-        not opts.get(b'amend')
+        not opts.get('amend')
         and bheads
         and node not in bheads
         and not any(
@@ -3378,7 +3373,7 @@
         #
         # H H  n  head merge: head count decreases
 
-    if not opts.get(b'close_branch'):
+    if not opts.get('close_branch'):
         for r in parents:
             if r.closesbranch() and r.branch() == branch:
                 repo.ui.status(
--- a/mercurial/color.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/color.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
 import re
 
 from .i18n import _
-from .pycompat import getattr
 
 from . import (
     encoding,
--- a/mercurial/commands.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/commands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -18,8 +18,8 @@
     short,
     wdirrev,
 )
-from .pycompat import open
 from . import (
+    admin_commands as admin_commands_mod,
     archival,
     bookmarks,
     bundle2,
@@ -76,6 +76,7 @@
 
 table = {}
 table.update(debugcommandsmod.command._table)
+table.update(admin_commands_mod.command._table)
 
 command = registrar.command(table)
 INTENT_READONLY = registrar.INTENT_READONLY
@@ -646,8 +647,7 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    rev = opts.get(b'rev')
+    rev = opts.get('rev')
     if rev:
         repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = logcmdutil.revsingle(repo, rev)
@@ -660,8 +660,8 @@
     if os.path.realpath(dest) == repo.root:
         raise error.InputError(_(b'repository root cannot be destination'))
 
-    kind = opts.get(b'type') or archival.guesskind(dest) or b'files'
-    prefix = opts.get(b'prefix')
+    kind = opts.get('type') or archival.guesskind(dest) or b'files'
+    prefix = opts.get('prefix')
 
     if dest == b'-':
         if kind == b'files':
@@ -671,16 +671,16 @@
             prefix = os.path.basename(repo.root) + b'-%h'
 
     prefix = cmdutil.makefilename(ctx, prefix)
-    match = scmutil.match(ctx, [], opts)
+    match = scmutil.match(ctx, [], pycompat.byteskwargs(opts))
     archival.archive(
         repo,
         dest,
         node,
         kind,
-        not opts.get(b'no_decode'),
+        not opts.get('no_decode'),
         match,
         prefix,
-        subrepos=opts.get(b'subrepos'),
+        subrepos=opts.get('subrepos'),
     )
 
 
@@ -775,7 +775,6 @@
 
 def _dobackout(ui, repo, node=None, rev=None, **opts):
     cmdutil.check_incompatible_arguments(opts, 'no_commit', ['commit', 'merge'])
-    opts = pycompat.byteskwargs(opts)
 
     if rev and node:
         raise error.InputError(_(b"please specify just one revision"))
@@ -786,9 +785,9 @@
     if not rev:
         raise error.InputError(_(b"please specify a revision to backout"))
 
-    date = opts.get(b'date')
+    date = opts.get('date')
     if date:
-        opts[b'date'] = dateutil.parsedate(date)
+        opts['date'] = dateutil.parsedate(date)
 
     cmdutil.checkunfinished(repo)
     cmdutil.bailifchanged(repo)
@@ -805,16 +804,16 @@
     if p1 == repo.nullid:
         raise error.InputError(_(b'cannot backout a change with no parents'))
     if p2 != repo.nullid:
-        if not opts.get(b'parent'):
+        if not opts.get('parent'):
             raise error.InputError(_(b'cannot backout a merge changeset'))
-        p = repo.lookup(opts[b'parent'])
+        p = repo.lookup(opts['parent'])
         if p not in (p1, p2):
             raise error.InputError(
                 _(b'%s is not a parent of %s') % (short(p), short(node))
             )
         parent = p
     else:
-        if opts.get(b'parent'):
+        if opts.get('parent'):
             raise error.InputError(
                 _(b'cannot use --parent on non-merge changeset')
             )
@@ -824,9 +823,9 @@
     branch = repo.dirstate.branch()
     bheads = repo.branchheads(branch)
     rctx = scmutil.revsingle(repo, hex(parent))
-    if not opts.get(b'merge') and op1 != node:
+    if not opts.get('merge') and op1 != node:
         with repo.transaction(b"backout"):
-            overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+            overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
             with ui.configoverride(overrides, b'backout'):
                 stats = mergemod.back_out(ctx, parent=repo[parent])
             repo.setparents(op1, op2)
@@ -841,7 +840,7 @@
         repo.dirstate.setbranch(branch, repo.currenttransaction())
         cmdutil.revert(ui, repo, rctx)
 
-    if opts.get(b'no_commit'):
+    if opts.get('no_commit'):
         msg = _(b"changeset %s backed out, don't forget to commit.\n")
         ui.status(msg % short(node))
         return 0
@@ -862,7 +861,9 @@
     # save to detect changes
     tip = repo.changelog.tip()
 
-    newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
+    newnode = cmdutil.commit(
+        ui, repo, commitfunc, [], pycompat.byteskwargs(opts)
+    )
     if not newnode:
         ui.status(_(b"nothing changed\n"))
         return 1
@@ -875,10 +876,10 @@
         _(b'changeset %s backs out changeset %s\n')
         % (nice(newnode), nice(node))
     )
-    if opts.get(b'merge') and op1 != node:
+    if opts.get('merge') and op1 != node:
         hg.clean(repo, op1, show_stats=False)
         ui.status(_(b'merging with changeset %s\n') % nice(newnode))
-        overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+        overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
         with ui.configoverride(overrides, b'backout'):
             return hg.merge(repo[b'tip'])
     return 0
@@ -1239,56 +1240,55 @@
 
           hg book -ql .
     """
-    opts = pycompat.byteskwargs(opts)
-    force = opts.get(b'force')
-    rev = opts.get(b'rev')
-    inactive = opts.get(b'inactive')  # meaning add/rename to inactive bookmark
-
-    action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list')
+    force = opts.get('force')
+    rev = opts.get('rev')
+    inactive = opts.get('inactive')  # meaning add/rename to inactive bookmark
+
+    action = cmdutil.check_at_most_one_arg(opts, 'delete', 'rename', 'list')
     if action:
-        cmdutil.check_incompatible_arguments(opts, action, [b'rev'])
+        cmdutil.check_incompatible_arguments(opts, action, ['rev'])
     elif names or rev:
-        action = b'add'
+        action = 'add'
     elif inactive:
-        action = b'inactive'  # meaning deactivate
+        action = 'inactive'  # meaning deactivate
     else:
-        action = b'list'
-
-    cmdutil.check_incompatible_arguments(
-        opts, b'inactive', [b'delete', b'list']
-    )
-    if not names and action in {b'add', b'delete'}:
+        action = 'list'
+
+    cmdutil.check_incompatible_arguments(opts, 'inactive', ['delete', 'list'])
+    if not names and action in {'add', 'delete'}:
         raise error.InputError(_(b"bookmark name required"))
 
-    if action in {b'add', b'delete', b'rename', b'inactive'}:
+    if action in {'add', 'delete', 'rename', 'inactive'}:
         with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
-            if action == b'delete':
+            if action == 'delete':
                 names = pycompat.maplist(repo._bookmarks.expandname, names)
                 bookmarks.delete(repo, tr, names)
-            elif action == b'rename':
+            elif action == 'rename':
                 if not names:
                     raise error.InputError(_(b"new bookmark name required"))
                 elif len(names) > 1:
                     raise error.InputError(
                         _(b"only one new bookmark name allowed")
                     )
-                oldname = repo._bookmarks.expandname(opts[b'rename'])
+                oldname = repo._bookmarks.expandname(opts['rename'])
                 bookmarks.rename(repo, tr, oldname, names[0], force, inactive)
-            elif action == b'add':
+            elif action == 'add':
                 bookmarks.addbookmarks(repo, tr, names, rev, force, inactive)
-            elif action == b'inactive':
+            elif action == 'inactive':
                 if len(repo._bookmarks) == 0:
                     ui.status(_(b"no bookmarks set\n"))
                 elif not repo._activebookmark:
                     ui.status(_(b"no active bookmark\n"))
                 else:
                     bookmarks.deactivate(repo)
-    elif action == b'list':
+    elif action == 'list':
         names = pycompat.maplist(repo._bookmarks.expandname, names)
-        with ui.formatter(b'bookmarks', opts) as fm:
+        with ui.formatter(b'bookmarks', pycompat.byteskwargs(opts)) as fm:
             bookmarks.printbookmarks(ui, repo, fm, names)
     else:
-        raise error.ProgrammingError(b'invalid action: %s' % action)
+        raise error.ProgrammingError(
+            b'invalid action: %s' % pycompat.sysbytes(action)
+        )
 
 
 @command(
@@ -1340,12 +1340,11 @@
 
     Returns 0 on success.
     """
-    opts = pycompat.byteskwargs(opts)
-    revs = opts.get(b'rev')
+    revs = opts.get('rev')
     if label:
         label = label.strip()
 
-    if not opts.get(b'clean') and not label:
+    if not opts.get('clean') and not label:
         if revs:
             raise error.InputError(
                 _(b"no branch name specified for the revisions")
@@ -1354,7 +1353,7 @@
         return
 
     with repo.wlock():
-        if opts.get(b'clean'):
+        if opts.get('clean'):
             label = repo[b'.'].branch()
             repo.dirstate.setbranch(label, repo.currenttransaction())
             ui.status(_(b'reset working directory to branch %s\n') % label)
@@ -1362,9 +1361,9 @@
 
             scmutil.checknewlabel(repo, label, b'branch')
             if revs:
-                return cmdutil.changebranch(ui, repo, revs, label, opts)
-
-            if not opts.get(b'force') and label in repo.branchmap():
+                return cmdutil.changebranch(ui, repo, revs, label, **opts)
+
+            if not opts.get('force') and label in repo.branchmap():
                 if label not in [p.branch() for p in repo[None].parents()]:
                     raise error.InputError(
                         _(b'a branch of the same name already exists'),
@@ -1428,8 +1427,7 @@
     Returns 0.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    revs = opts.get(b'rev')
+    revs = opts.get('rev')
     selectedbranches = None
     if revs:
         revs = logcmdutil.revrange(repo, revs)
@@ -1437,7 +1435,7 @@
         selectedbranches = {getbi(r)[0] for r in revs}
 
     ui.pager(b'branches')
-    fm = ui.formatter(b'branches', opts)
+    fm = ui.formatter(b'branches', pycompat.byteskwargs(opts))
     hexfunc = fm.hexfunc
 
     allheads = set(repo.heads())
@@ -1568,16 +1566,15 @@
 
     Returns 0 on success, 1 if no changes found.
     """
-    opts = pycompat.byteskwargs(opts)
 
     revs = None
-    if b'rev' in opts:
-        revstrings = opts[b'rev']
+    if 'rev' in opts:
+        revstrings = opts['rev']
         revs = logcmdutil.revrange(repo, revstrings)
         if revstrings and not revs:
             raise error.InputError(_(b'no commits to bundle'))
 
-    bundletype = opts.get(b'type', b'bzip2').lower()
+    bundletype = opts.get('type', b'bzip2').lower()
     try:
         bundlespec = bundlecaches.parsebundlespec(
             repo, bundletype, strict=False
@@ -1596,28 +1593,28 @@
             hint=_(b"use 'hg debugcreatestreamclonebundle'"),
         )
 
-    if opts.get(b'all'):
+    if opts.get('all'):
         if dests:
             raise error.InputError(
                 _(b"--all is incompatible with specifying destinations")
             )
-        if opts.get(b'base'):
+        if opts.get('base'):
             ui.warn(_(b"ignoring --base because --all was specified\n"))
-        if opts.get(b'exact'):
+        if opts.get('exact'):
             ui.warn(_(b"ignoring --exact because --all was specified\n"))
         base = [nullrev]
-    elif opts.get(b'exact'):
+    elif opts.get('exact'):
         if dests:
             raise error.InputError(
                 _(b"--exact is incompatible with specifying destinations")
             )
-        if opts.get(b'base'):
+        if opts.get('base'):
             ui.warn(_(b"ignoring --base because --exact was specified\n"))
         base = repo.revs(b'parents(%ld) - %ld', revs, revs)
         if not base:
             base = [nullrev]
     else:
-        base = logcmdutil.revrange(repo, opts.get(b'base'))
+        base = logcmdutil.revrange(repo, opts.get('base'))
     if cgversion not in changegroup.supportedoutgoingversions(repo):
         raise error.Abort(
             _(b"repository does not support bundle version %s") % cgversion
@@ -1638,7 +1635,7 @@
         missing = set()
         excluded = set()
         for path in urlutil.get_push_paths(repo, ui, dests):
-            other = hg.peer(repo, opts, path)
+            other = hg.peer(repo, pycompat.byteskwargs(opts), path)
             if revs is not None:
                 hex_revs = [repo[r].hex() for r in revs]
             else:
@@ -1656,7 +1653,7 @@
                 repo,
                 other,
                 onlyheads=heads,
-                force=opts.get(b'force'),
+                force=opts.get('force'),
                 portable=True,
             )
             missing.update(outgoing.missing)
@@ -1794,25 +1791,22 @@
 
     Returns 0 on success.
     """
-    opts = pycompat.byteskwargs(opts)
-    rev = opts.get(b'rev')
+    rev = opts.get('rev')
     if rev:
         repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = logcmdutil.revsingle(repo, rev)
-    m = scmutil.match(ctx, (file1,) + pats, opts)
-    fntemplate = opts.pop(b'output', b'')
+    m = scmutil.match(ctx, (file1,) + pats, pycompat.byteskwargs(opts))
+    fntemplate = opts.pop('output', b'')
     if cmdutil.isstdiofilename(fntemplate):
         fntemplate = b''
 
     if fntemplate:
-        fm = formatter.nullformatter(ui, b'cat', opts)
+        fm = formatter.nullformatter(ui, b'cat', pycompat.byteskwargs(opts))
     else:
         ui.pager(b'cat')
-        fm = ui.formatter(b'cat', opts)
+        fm = ui.formatter(b'cat', pycompat.byteskwargs(opts))
     with fm:
-        return cmdutil.cat(
-            ui, repo, ctx, m, fm, fntemplate, b'', **pycompat.strkwargs(opts)
-        )
+        return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, b'', **opts)
 
 
 @command(
@@ -1972,37 +1966,36 @@
 
     Returns 0 on success.
     """
-    opts = pycompat.byteskwargs(opts)
-    cmdutil.check_at_most_one_arg(opts, b'noupdate', b'updaterev')
+    cmdutil.check_at_most_one_arg(opts, 'noupdate', 'updaterev')
 
     # --include/--exclude can come from narrow or sparse.
     includepats, excludepats = None, None
 
     # hg.clone() differentiates between None and an empty set. So make sure
     # patterns are sets if narrow is requested without patterns.
-    if opts.get(b'narrow'):
+    if opts.get('narrow'):
         includepats = set()
         excludepats = set()
 
-        if opts.get(b'include'):
-            includepats = narrowspec.parsepatterns(opts.get(b'include'))
-        if opts.get(b'exclude'):
-            excludepats = narrowspec.parsepatterns(opts.get(b'exclude'))
+        if opts.get('include'):
+            includepats = narrowspec.parsepatterns(opts.get('include'))
+        if opts.get('exclude'):
+            excludepats = narrowspec.parsepatterns(opts.get('exclude'))
 
     r = hg.clone(
         ui,
-        opts,
+        pycompat.byteskwargs(opts),
         source,
         dest,
-        pull=opts.get(b'pull'),
-        stream=opts.get(b'stream') or opts.get(b'uncompressed'),
-        revs=opts.get(b'rev'),
-        update=opts.get(b'updaterev') or not opts.get(b'noupdate'),
-        branch=opts.get(b'branch'),
-        shareopts=opts.get(b'shareopts'),
+        pull=opts.get('pull'),
+        stream=opts.get('stream') or opts.get('uncompressed'),
+        revs=opts.get('rev'),
+        update=opts.get('updaterev') or not opts.get('noupdate'),
+        branch=opts.get('branch'),
+        shareopts=opts.get('shareopts'),
         storeincludepats=includepats,
         storeexcludepats=excludepats,
-        depth=opts.get(b'depth') or None,
+        depth=opts.get('depth') or None,
     )
 
     return r is None
@@ -2178,7 +2171,6 @@
             cmdutil.checkunfinished(repo)
 
         node = cmdutil.amend(ui, repo, old, extra, pats, opts)
-        opts = pycompat.byteskwargs(opts)
         if node == old.node():
             ui.status(_(b"nothing changed\n"))
             return 1
@@ -2209,11 +2201,14 @@
                         extra=extra,
                     )
 
-        opts = pycompat.byteskwargs(opts)
-        node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
+        node = cmdutil.commit(
+            ui, repo, commitfunc, pats, pycompat.byteskwargs(opts)
+        )
 
         if not node:
-            stat = cmdutil.postcommitstatus(repo, pats, opts)
+            stat = cmdutil.postcommitstatus(
+                repo, pats, pycompat.byteskwargs(opts)
+            )
             if stat.deleted:
                 ui.status(
                     _(
@@ -2226,7 +2221,7 @@
                 ui.status(_(b"nothing changed\n"))
             return 1
 
-    cmdutil.commitstatus(repo, node, branch, bheads, tip, opts)
+    cmdutil.commitstatus(repo, node, branch, bheads, tip, **opts)
 
     if not ui.quiet and ui.configbool(b'commands', b'commit.post-status'):
         status(
@@ -2237,7 +2232,7 @@
             removed=True,
             deleted=True,
             unknown=True,
-            subrepos=opts.get(b'subrepos'),
+            subrepos=opts.get('subrepos'),
         )
 
 
@@ -2319,19 +2314,18 @@
 
     """
 
-    opts = pycompat.byteskwargs(opts)
-    editopts = (b'edit', b'local', b'global', b'shared', b'non_shared')
+    editopts = ('edit', 'local', 'global', 'shared', 'non_shared')
     if any(opts.get(o) for o in editopts):
         cmdutil.check_at_most_one_arg(opts, *editopts[1:])
-        if opts.get(b'local'):
+        if opts.get('local'):
             if not repo:
                 raise error.InputError(
                     _(b"can't use --local outside a repository")
                 )
             paths = [repo.vfs.join(b'hgrc')]
-        elif opts.get(b'global'):
+        elif opts.get('global'):
             paths = rcutil.systemrcpath()
-        elif opts.get(b'shared'):
+        elif opts.get('shared'):
             if not repo.shared():
                 raise error.InputError(
                     _(b"repository is not shared; can't use --shared")
@@ -2344,7 +2338,7 @@
                     )
                 )
             paths = [vfsmod.vfs(repo.sharedpath).join(b'hgrc')]
-        elif opts.get(b'non_shared'):
+        elif opts.get('non_shared'):
             paths = [repo.vfs.join(b'hgrc-not-shared')]
         else:
             paths = rcutil.userrcpath()
@@ -2353,17 +2347,15 @@
             if os.path.exists(f):
                 break
         else:
-            if opts.get(b'global'):
+            if opts.get('global'):
                 samplehgrc = uimod.samplehgrcs[b'global']
-            elif opts.get(b'local'):
+            elif opts.get('local'):
                 samplehgrc = uimod.samplehgrcs[b'local']
             else:
                 samplehgrc = uimod.samplehgrcs[b'user']
 
             f = paths[0]
-            fp = open(f, b"wb")
-            fp.write(util.tonativeeol(samplehgrc))
-            fp.close()
+            util.writefile(f, util.tonativeeol(samplehgrc))
 
         editor = ui.geteditor()
         ui.system(
@@ -2374,7 +2366,7 @@
         )
         return
     ui.pager(b'config')
-    fm = ui.formatter(b'config', opts)
+    fm = ui.formatter(b'config', pycompat.byteskwargs(opts))
     for t, f in rcutil.rccomponents():
         if t == b'path':
             ui.debug(b'read config from: %s\n' % f)
@@ -2385,7 +2377,7 @@
             pass
         else:
             raise error.ProgrammingError(b'unknown rctype: %s' % t)
-    untrusted = bool(opts.get(b'untrusted'))
+    untrusted = bool(opts.get('untrusted'))
 
     selsections = selentries = []
     if values:
@@ -2396,8 +2388,8 @@
     selentries = set(selentries)
 
     matched = False
-    all_known = opts[b'exp_all_known']
-    show_source = ui.debugflag or opts.get(b'source')
+    all_known = opts['exp_all_known']
+    show_source = ui.debugflag or opts.get('source')
     entries = ui.walkconfig(untrusted=untrusted, all_known=all_known)
     for section, name, value in entries:
         source = ui.configsource(section, name, untrusted)
@@ -2506,11 +2498,10 @@
 
     Returns 0 on success, 1 if errors are encountered.
     """
-    opts = pycompat.byteskwargs(opts)
 
     context = lambda repo: repo.dirstate.changing_files(repo)
-    rev = opts.get(b'at_rev')
-    ctx = None
+    rev = opts.get('at_rev')
+
     if rev:
         ctx = logcmdutil.revsingle(repo, rev)
         if ctx.rev() is not None:
@@ -2518,9 +2509,9 @@
             def context(repo):
                 return util.nullcontextmanager()
 
-            opts[b'at_rev'] = ctx.rev()
+            opts['at_rev'] = ctx.rev()
     with repo.wlock(), context(repo):
-        return cmdutil.copy(ui, repo, pats, opts)
+        return cmdutil.copy(ui, repo, pats, pycompat.byteskwargs(opts))
 
 
 @command(
@@ -2984,13 +2975,12 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
     if not pats:
         raise error.InputError(_(b'no files specified'))
 
     with repo.wlock(), repo.dirstate.changing_files(repo):
-        m = scmutil.match(repo[None], pats, opts)
-        dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
+        m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
+        dryrun, interactive = opts.get('dry_run'), opts.get('interactive')
         uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
         rejected = cmdutil.forget(
             ui,
@@ -3755,19 +3745,18 @@
     Returns 0 if matching heads are found, 1 if not.
     """
 
-    opts = pycompat.byteskwargs(opts)
     start = None
-    rev = opts.get(b'rev')
+    rev = opts.get('rev')
     if rev:
         repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
         start = logcmdutil.revsingle(repo, rev, None).node()
 
-    if opts.get(b'topo'):
+    if opts.get('topo'):
         heads = [repo[h] for h in repo.heads(start)]
     else:
         heads = []
         for branch in repo.branchmap():
-            heads += repo.branchheads(branch, start, opts.get(b'closed'))
+            heads += repo.branchheads(branch, start, opts.get('closed'))
         heads = [repo[h] for h in heads]
 
     if branchrevs:
@@ -3776,7 +3765,7 @@
         }
         heads = [h for h in heads if h.branch() in branches]
 
-    if opts.get(b'active') and branchrevs:
+    if opts.get('active') and branchrevs:
         dagheads = repo.heads(start)
         heads = [h for h in heads if h.node() in dagheads]
 
@@ -3785,8 +3774,8 @@
         if branches - haveheads:
             headless = b', '.join(b for b in branches - haveheads)
             msg = _(b'no open branch heads found on branches %s')
-            if opts.get(b'rev'):
-                msg += _(b' (started at %s)') % opts[b'rev']
+            if opts.get('rev'):
+                msg += _(b' (started at %s)') % opts['rev']
             ui.warn((msg + b'\n') % headless)
 
     if not heads:
@@ -3794,7 +3783,9 @@
 
     ui.pager(b'heads')
     heads = sorted(heads, key=lambda x: -(x.rev()))
-    displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+    displayer = logcmdutil.changesetdisplayer(
+        ui, repo, pycompat.byteskwargs(opts)
+    )
     for ctx in heads:
         displayer.show(ctx)
     displayer.close()
@@ -4221,20 +4212,20 @@
         opts, 'no_commit', ['bypass', 'secret']
     )
     cmdutil.check_incompatible_arguments(opts, 'exact', ['edit', 'prefix'])
-    opts = pycompat.byteskwargs(opts)
+
     if not patch1:
         raise error.InputError(_(b'need at least one patch to import'))
 
     patches = (patch1,) + patches
 
-    date = opts.get(b'date')
+    date = opts.get('date')
     if date:
-        opts[b'date'] = dateutil.parsedate(date)
-
-    exact = opts.get(b'exact')
-    update = not opts.get(b'bypass')
+        opts['date'] = dateutil.parsedate(date)
+
+    exact = opts.get('exact')
+    update = not opts.get('bypass')
     try:
-        sim = float(opts.get(b'similarity') or 0)
+        sim = float(opts.get('similarity') or 0)
     except ValueError:
         raise error.InputError(_(b'similarity must be a number'))
     if sim < 0 or sim > 100:
@@ -4242,17 +4233,17 @@
     if sim and not update:
         raise error.InputError(_(b'cannot use --similarity with --bypass'))
 
-    base = opts[b"base"]
+    base = opts["base"]
     msgs = []
     ret = 0
 
     with repo.wlock():
         if update:
             cmdutil.checkunfinished(repo)
-            if exact or not opts.get(b'force'):
+            if exact or not opts.get('force'):
                 cmdutil.bailifchanged(repo)
 
-        if not opts.get(b'no_commit'):
+        if not opts.get('no_commit'):
             lock = repo.lock
             tr = lambda: repo.transaction(b'import')
         else:
@@ -4274,7 +4265,13 @@
                 for hunk in patch.split(patchfile):
                     with patch.extract(ui, hunk) as patchdata:
                         msg, node, rej = cmdutil.tryimportone(
-                            ui, repo, patchdata, parents, opts, msgs, hg.clean
+                            ui,
+                            repo,
+                            patchdata,
+                            parents,
+                            pycompat.byteskwargs(opts),
+                            msgs,
+                            hg.clean,
                         )
                     if msg:
                         haspatch = True
@@ -4502,16 +4499,19 @@
 
     Returns 0 if a match is found, 1 otherwise.
     """
-    opts = pycompat.byteskwargs(opts)
-    if opts.get(b'print0'):
+    if opts.get('print0'):
         end = b'\0'
     else:
         end = b'\n'
-    ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
+    ctx = logcmdutil.revsingle(repo, opts.get('rev'), None)
 
     ret = 1
     m = scmutil.match(
-        ctx, pats, opts, default=b'relglob', badfn=lambda x, y: False
+        ctx,
+        pats,
+        pycompat.byteskwargs(opts),
+        default=b'relglob',
+        badfn=lambda x, y: False,
     )
 
     ui.pager(b'locate')
@@ -4523,7 +4523,7 @@
         filesgen = ctx.matches(m)
     uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=bool(pats))
     for abs in filesgen:
-        if opts.get(b'fullpath'):
+        if opts.get('fullpath'):
             ui.write(repo.wjoin(abs), end)
         else:
             ui.write(uipathfn(abs), end)
@@ -4823,10 +4823,9 @@
 
     Returns 0 on success.
     """
-    opts = pycompat.byteskwargs(opts)
-    fm = ui.formatter(b'manifest', opts)
-
-    if opts.get(b'all'):
+    fm = ui.formatter(b'manifest', pycompat.byteskwargs(opts))
+
+    if opts.get('all'):
         if rev or node:
             raise error.InputError(_(b"can't specify a revision with --all"))
 
@@ -4917,11 +4916,10 @@
     Returns 0 on success, 1 if there are unresolved files.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    abort = opts.get(b'abort')
+    abort = opts.get('abort')
     if abort and repo.dirstate.p2() == repo.nullid:
         cmdutil.wrongtooltocontinue(repo, _(b'merge'))
-    cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
+    cmdutil.check_incompatible_arguments(opts, 'abort', ['rev', 'preview'])
     if abort:
         state = cmdutil.getunfinishedstate(repo)
         if state and state._opname != b'merge':
@@ -4933,10 +4931,10 @@
             raise error.InputError(_(b"cannot specify a node with --abort"))
         return hg.abortmerge(repo.ui, repo)
 
-    if opts.get(b'rev') and node:
+    if opts.get('rev') and node:
         raise error.InputError(_(b"please specify just one revision"))
     if not node:
-        node = opts.get(b'rev')
+        node = opts.get('rev')
 
     if node:
         ctx = logcmdutil.revsingle(repo, node)
@@ -4955,22 +4953,24 @@
             _(b'merging with the working copy has no effect')
         )
 
-    if opts.get(b'preview'):
+    if opts.get('preview'):
         # find nodes that are ancestors of p2 but not of p1
         p1 = repo[b'.'].node()
         p2 = ctx.node()
         nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
 
-        displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
+        displayer = logcmdutil.changesetdisplayer(
+            ui, repo, pycompat.byteskwargs(opts)
+        )
         for node in nodes:
             displayer.show(repo[node])
         displayer.close()
         return 0
 
     # ui.forcemerge is an internal variable, do not document
-    overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+    overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
     with ui.configoverride(overrides, b'merge'):
-        force = opts.get(b'force')
+        force = opts.get('force')
         labels = [b'working copy', b'merge rev', b'common ancestor']
         return hg.merge(ctx, force=force, labels=labels)
 
@@ -5198,12 +5198,10 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
-
     pathitems = urlutil.list_paths(ui, search)
     ui.pager(b'paths')
 
-    fm = ui.formatter(b'paths', opts)
+    fm = ui.formatter(b'paths', pycompat.byteskwargs(opts))
     if fm.isplain():
         hidepassword = urlutil.hidepassword
     else:
@@ -5457,33 +5455,37 @@
     Returns 0 on success, 1 if an update had unresolved files.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    if ui.configbool(b'commands', b'update.requiredest') and opts.get(
-        b'update'
-    ):
+    if ui.configbool(b'commands', b'update.requiredest') and opts.get('update'):
         msg = _(b'update destination required by configuration')
         hint = _(b'use hg pull followed by hg update DEST')
         raise error.InputError(msg, hint=hint)
 
+    update_conflict = None
+
     for path in urlutil.get_pull_paths(repo, ui, sources):
         ui.status(_(b'pulling from %s\n') % urlutil.hidepassword(path.loc))
         ui.flush()
-        other = hg.peer(repo, opts, path, remotehidden=opts[b'remote_hidden'])
+        other = hg.peer(
+            repo,
+            pycompat.byteskwargs(opts),
+            path,
+            remotehidden=opts['remote_hidden'],
+        )
         update_conflict = None
         try:
-            branches = (path.branch, opts.get(b'branch', []))
+            branches = (path.branch, opts.get('branch', []))
             revs, checkout = hg.addbranchrevs(
                 repo,
                 other,
                 branches,
-                opts.get(b'rev'),
-                remotehidden=opts[b'remote_hidden'],
+                opts.get('rev'),
+                remotehidden=opts['remote_hidden'],
             )
 
             pullopargs = {}
 
             nodes = None
-            if opts.get(b'bookmark') or revs:
+            if opts.get('bookmark') or revs:
                 # The list of bookmark used here is the same used to actually update
                 # the bookmark names, to avoid the race from issue 4689 and we do
                 # all lookup and bookmark queries in one go so they see the same
@@ -5506,7 +5508,7 @@
                 remotebookmarks = fremotebookmarks.result()
                 remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
                 pullopargs[b'remotebookmarks'] = remotebookmarks
-                for b in opts.get(b'bookmark', []):
+                for b in opts.get('bookmark', []):
                     b = repo._bookmarks.expandname(b)
                     if b not in remotebookmarks:
                         raise error.InputError(
@@ -5520,19 +5522,19 @@
                         checkout = node
 
             wlock = util.nullcontextmanager()
-            if opts.get(b'update'):
+            if opts.get('update'):
                 wlock = repo.wlock()
             with wlock:
-                pullopargs.update(opts.get(b'opargs', {}))
+                pullopargs.update(opts.get('opargs', {}))
                 modheads = exchange.pull(
                     repo,
                     other,
                     path=path,
                     heads=nodes,
-                    force=opts.get(b'force'),
-                    bookmarks=opts.get(b'bookmark', ()),
+                    force=opts.get('force'),
+                    bookmarks=opts.get('bookmark', ()),
                     opargs=pullopargs,
-                    confirm=opts.get(b'confirm'),
+                    confirm=opts.get('confirm'),
                 ).cgresult
 
                 # brev is a name, which might be a bookmark to be activated at
@@ -5546,10 +5548,10 @@
                     # order below depends on implementation of
                     # hg.addbranchrevs(). opts['bookmark'] is ignored,
                     # because 'checkout' is determined without it.
-                    if opts.get(b'rev'):
-                        brev = opts[b'rev'][0]
-                    elif opts.get(b'branch'):
-                        brev = opts[b'branch'][0]
+                    if opts.get('rev'):
+                        brev = opts['rev'][0]
+                    elif opts.get('branch'):
+                        brev = opts['branch'][0]
                     else:
                         brev = path.branch
 
@@ -5559,7 +5561,7 @@
                 repo._subtoppath = path.loc
                 try:
                     update_conflict = postincoming(
-                        ui, repo, modheads, opts.get(b'update'), checkout, brev
+                        ui, repo, modheads, opts.get('update'), checkout, brev
                     )
                 except error.FilteredRepoLookupError as exc:
                     msg = _(b'cannot update to target: %s') % exc.args[0]
@@ -5633,24 +5635,23 @@
     list of files that this program would delete, use the --print
     option.
     """
-    opts = pycompat.byteskwargs(opts)
-    cmdutil.check_at_most_one_arg(opts, b'all', b'ignored')
-
-    act = not opts.get(b'print')
+    cmdutil.check_at_most_one_arg(opts, 'all', 'ignored')
+
+    act = not opts.get('print')
     eol = b'\n'
-    if opts.get(b'print0'):
+    if opts.get('print0'):
         eol = b'\0'
         act = False  # --print0 implies --print
-    if opts.get(b'all', False):
+    if opts.get('all', False):
         ignored = True
         unknown = True
     else:
-        ignored = opts.get(b'ignored', False)
+        ignored = opts.get('ignored', False)
         unknown = not ignored
 
-    removefiles = opts.get(b'files')
-    removedirs = opts.get(b'dirs')
-    confirm = opts.get(b'confirm')
+    removefiles = opts.get('files')
+    removedirs = opts.get('dirs')
+    confirm = opts.get('confirm')
     if confirm is None:
         try:
             extensions.find(b'purge')
@@ -5662,7 +5663,7 @@
         removefiles = True
         removedirs = True
 
-    match = scmutil.match(repo[None], dirs, opts)
+    match = scmutil.match(repo[None], dirs, pycompat.byteskwargs(opts))
 
     paths = mergemod.purge(
         repo,
@@ -5671,7 +5672,7 @@
         ignored=ignored,
         removeemptydirs=removedirs,
         removefiles=removefiles,
-        abortonerror=opts.get(b'abort_on_err'),
+        abortonerror=opts.get('abort_on_err'),
         noop=not act,
         confirm=confirm,
     )
@@ -5974,15 +5975,14 @@
     Returns 0 on success, 1 if any warnings encountered.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    after, force = opts.get(b'after'), opts.get(b'force')
-    dryrun = opts.get(b'dry_run')
+    after, force = opts.get('after'), opts.get('force')
+    dryrun = opts.get('dry_run')
     if not pats and not after:
         raise error.InputError(_(b'no files specified'))
 
     with repo.wlock(), repo.dirstate.changing_files(repo):
-        m = scmutil.match(repo[None], pats, opts)
-        subrepos = opts.get(b'subrepos')
+        m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
+        subrepos = opts.get('subrepos')
         uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
         return cmdutil.remove(
             ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
@@ -6034,10 +6034,9 @@
 
     Returns 0 on success, 1 if errors are encountered.
     """
-    opts = pycompat.byteskwargs(opts)
     context = lambda repo: repo.dirstate.changing_files(repo)
-    rev = opts.get(b'at_rev')
-    ctx = None
+    rev = opts.get('at_rev')
+
     if rev:
         ctx = logcmdutil.revsingle(repo, rev)
         if ctx.rev() is not None:
@@ -6045,9 +6044,11 @@
             def context(repo):
                 return util.nullcontextmanager()
 
-            opts[b'at_rev'] = ctx.rev()
+            opts['at_rev'] = ctx.rev()
     with repo.wlock(), context(repo):
-        return cmdutil.copy(ui, repo, pats, opts, rename=True)
+        return cmdutil.copy(
+            ui, repo, pats, pycompat.byteskwargs(opts), rename=True
+        )
 
 
 @command(
@@ -6398,30 +6399,29 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
-    if opts.get(b"date"):
-        cmdutil.check_incompatible_arguments(opts, b'date', [b'rev'])
-        opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
+    if opts.get("date"):
+        cmdutil.check_incompatible_arguments(opts, 'date', ['rev'])
+        opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
 
     parent, p2 = repo.dirstate.parents()
-    if not opts.get(b'rev') and p2 != repo.nullid:
+    if not opts.get('rev') and p2 != repo.nullid:
         # revert after merge is a trap for new users (issue2915)
         raise error.InputError(
             _(b'uncommitted merge with no revision specified'),
             hint=_(b"use 'hg update' or see 'hg help revert'"),
         )
 
-    rev = opts.get(b'rev')
+    rev = opts.get('rev')
     if rev:
         repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
     ctx = logcmdutil.revsingle(repo, rev)
 
     if not (
         pats
-        or opts.get(b'include')
-        or opts.get(b'exclude')
-        or opts.get(b'all')
-        or opts.get(b'interactive')
+        or opts.get('include')
+        or opts.get('exclude')
+        or opts.get('all')
+        or opts.get('interactive')
     ):
         msg = _(b"no files or directories specified")
         if p2 != repo.nullid:
@@ -6455,7 +6455,7 @@
             hint = _(b"use --all to revert all files")
         raise error.InputError(msg, hint=hint)
 
-    return cmdutil.revert(ui, repo, ctx, *pats, **pycompat.strkwargs(opts))
+    return cmdutil.revert(ui, repo, ctx, *pats, **opts)
 
 
 @command(
@@ -7117,7 +7117,6 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
     ui.pager(b'summary')
     ctx = repo[None]
     parents = ctx.parents()
@@ -7306,11 +7305,13 @@
 
     cmdutil.summaryhooks(ui, repo)
 
-    if opts.get(b'remote'):
+    if opts.get('remote'):
         needsincoming, needsoutgoing = True, True
     else:
         needsincoming, needsoutgoing = False, False
-        for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None):
+        for i, o in cmdutil.summaryremotehooks(
+            ui, repo, pycompat.byteskwargs(opts), None
+        ):
             if i:
                 needsincoming = True
             if o:
@@ -7327,7 +7328,7 @@
         try:
             other = hg.peer(repo, {}, path)
         except error.RepoError:
-            if opts.get(b'remote'):
+            if opts.get('remote'):
                 raise
             return path.loc, sbranch, None, None, None
         branches = (path.branch, [])
@@ -7366,7 +7367,7 @@
             try:
                 dother = hg.peer(repo, {}, path if path is not None else dest)
             except error.RepoError:
-                if opts.get(b'remote'):
+                if opts.get('remote'):
                     raise
                 return dest, dbranch, None, None
             ui.debug(b'comparing with %s\n' % urlutil.hidepassword(dest))
@@ -7392,7 +7393,7 @@
     else:
         dest = dbranch = dother = outgoing = None
 
-    if opts.get(b'remote'):
+    if opts.get('remote'):
         # Help pytype.  --remote sets both `needsincoming` and `needsoutgoing`.
         # The former always sets `sother` (or raises an exception if it can't);
         # the latter always sets `outgoing`.
@@ -7423,7 +7424,7 @@
     cmdutil.summaryremotehooks(
         ui,
         repo,
-        opts,
+        pycompat.byteskwargs(opts),
         (
             (source, sbranch, sother, commoninc),
             (dest, dbranch, dother, outgoing),
@@ -7479,7 +7480,7 @@
     Returns 0 on success.
     """
     cmdutil.check_incompatible_arguments(opts, 'remove', ['rev'])
-    opts = pycompat.byteskwargs(opts)
+
     with repo.wlock(), repo.lock():
         rev_ = b"."
         names = [t.strip() for t in (name1,) + names]
@@ -7491,11 +7492,11 @@
                 raise error.InputError(
                     _(b'tag names cannot consist entirely of whitespace')
                 )
-        if opts.get(b'rev'):
-            rev_ = opts[b'rev']
-        message = opts.get(b'message')
-        if opts.get(b'remove'):
-            if opts.get(b'local'):
+        if opts.get('rev'):
+            rev_ = opts['rev']
+        message = opts.get('message')
+        if opts.get('remove'):
+            if opts.get('local'):
                 expectedtype = b'local'
             else:
                 expectedtype = b'global'
@@ -7522,18 +7523,18 @@
             if not message:
                 # we don't translate commit messages
                 message = b'Removed tag %s' % b', '.join(names)
-        elif not opts.get(b'force'):
+        elif not opts.get('force'):
             for n in names:
                 if n in repo.tags():
                     raise error.InputError(
                         _(b"tag '%s' already exists (use -f to force)") % n
                     )
-        if not opts.get(b'local'):
+        if not opts.get('local'):
             p1, p2 = repo.dirstate.parents()
             if p2 != repo.nullid:
                 raise error.StateError(_(b'uncommitted merge'))
             bheads = repo.branchheads()
-            if not opts.get(b'force') and bheads and p1 not in bheads:
+            if not opts.get('force') and bheads and p1 not in bheads:
                 raise error.InputError(
                     _(
                         b'working directory is not at a branch head '
@@ -7545,7 +7546,7 @@
         # don't allow tagging the null rev or the working directory
         if node is None:
             raise error.InputError(_(b"cannot tag working directory"))
-        elif not opts.get(b'remove') and node == nullid:
+        elif not opts.get('remove') and node == nullid:
             raise error.InputError(_(b"cannot tag null revision"))
 
         if not message:
@@ -7555,25 +7556,23 @@
                 short(node),
             )
 
-        date = opts.get(b'date')
+        date = opts.get('date')
         if date:
             date = dateutil.parsedate(date)
 
-        if opts.get(b'remove'):
+        if opts.get('remove'):
             editform = b'tag.remove'
         else:
             editform = b'tag.add'
-        editor = cmdutil.getcommiteditor(
-            editform=editform, **pycompat.strkwargs(opts)
-        )
+        editor = cmdutil.getcommiteditor(editform=editform, **opts)
 
         tagsmod.tag(
             repo,
             names,
             node,
             message,
-            opts.get(b'local'),
-            opts.get(b'user'),
+            opts.get('local'),
+            opts.get('user'),
             date,
             editor=editor,
         )
@@ -7606,9 +7605,8 @@
     Returns 0 on success.
     """
 
-    opts = pycompat.byteskwargs(opts)
     ui.pager(b'tags')
-    fm = ui.formatter(b'tags', opts)
+    fm = ui.formatter(b'tags', pycompat.byteskwargs(opts))
     hexfunc = fm.hexfunc
 
     for t, n in reversed(repo.tagslist()):
@@ -7963,12 +7961,13 @@
     for more information about recovery from corruption of the
     repository.
 
+    For an alternative UI with a lot more control over the verification
+    process and better error reporting, try `hg help admin::verify`.
+
     Returns 0 on success, 1 if errors are encountered.
     """
-    opts = pycompat.byteskwargs(opts)
-
     level = None
-    if opts[b'full']:
+    if opts['full']:
         level = verifymod.VERIFY_FULL
     return hg.verify(repo, level)
 
@@ -7998,10 +7997,9 @@
       :bundled: Boolean. True if included in the release.
       :name:    String. Extension name.
     """
-    opts = pycompat.byteskwargs(opts)
     if ui.verbose:
         ui.pager(b'version')
-    fm = ui.formatter(b"version", opts)
+    fm = ui.formatter(b"version", pycompat.byteskwargs(opts))
     fm.startitem()
     fm.write(
         b"ver", _(b"Mercurial Distributed SCM (version %s)\n"), util.version()
--- a/mercurial/commandserver.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/commandserver.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,7 +16,6 @@
 import traceback
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -332,7 +331,7 @@
             # any kind of interaction must use server channels, but chg may
             # replace channels by fully functional tty files. so nontty is
             # enforced only if cin is a channel.
-            if not util.safehasattr(self.cin, 'fileno'):
+            if not hasattr(self.cin, 'fileno'):
                 ui.setconfig(b'ui', b'nontty', b'true', b'commandserver')
 
         req = dispatch.request(
@@ -384,7 +383,7 @@
         if self.cmsg:
             hellomsg += b'message-encoding: %s\n' % self.cmsg.encoding
         hellomsg += b'pid: %d' % procutil.getpid()
-        if util.safehasattr(os, 'getpgid'):
+        if hasattr(os, 'getpgid'):
             hellomsg += b'\n'
             hellomsg += b'pgid: %d' % os.getpgid(0)
 
@@ -559,7 +558,7 @@
         self.ui = ui
         self.repo = repo
         self.address = opts[b'address']
-        if not util.safehasattr(socket, 'AF_UNIX'):
+        if not hasattr(socket, 'AF_UNIX'):
             raise error.Abort(_(b'unsupported platform'))
         if not self.address:
             raise error.Abort(_(b'no socket path specified with --address'))
@@ -588,7 +587,7 @@
         o = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
         self._mainipc, self._workeripc = o
         self._servicehandler.bindsocket(self._sock, self.address)
-        if util.safehasattr(procutil, 'unblocksignal'):
+        if hasattr(procutil, 'unblocksignal'):
             procutil.unblocksignal(signal.SIGCHLD)
         o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
         self._oldsigchldhandler = o
--- a/mercurial/config.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/config.py	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,6 @@
 import os
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     encoding,
     error,
--- a/mercurial/configitems.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/configitems.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,11 +9,21 @@
 import functools
 import re
 
+from .utils import resourceutil
+
 from . import (
     encoding,
     error,
 )
 
+try:
+    import tomllib  # pytype: disable=import-error
+
+    tomllib.load  # trigger lazy import
+except ModuleNotFoundError:
+    # Python <3.11 compat
+    from .thirdparty import tomli as tomllib
+
 
 def loadconfigtable(ui, extname, configtable):
     """update config item known to the ui with the extension ones"""
@@ -22,7 +32,7 @@
         knownkeys = set(knownitems)
         newkeys = set(items)
         for key in sorted(knownkeys & newkeys):
-            msg = b"extension '%s' overwrite config item '%s.%s'"
+            msg = b"extension '%s' overwrites config item '%s.%s'"
             msg %= (extname, section, key)
             ui.develwarn(msg, config=b'warn-config')
 
@@ -48,15 +58,19 @@
         generic=False,
         priority=0,
         experimental=False,
+        documentation="",
+        in_core_extension=None,
     ):
         self.section = section
         self.name = name
         self.default = default
+        self.documentation = documentation
         self.alias = list(alias)
         self.generic = generic
         self.priority = priority
         self.experimental = experimental
         self._re = None
+        self.in_core_extension = in_core_extension
         if generic:
             self._re = re.compile(self.name)
 
@@ -102,6 +116,74 @@
         return None
 
 
+def sanitize_item(item):
+    """Apply the transformations that are encoded on top of the pure data"""
+
+    # Set the special defaults
+    default_type_key = "default-type"
+    default_type = item.pop(default_type_key, None)
+    if default_type == "dynamic":
+        item["default"] = dynamicdefault
+    elif default_type == "list_type":
+        item["default"] = list
+    elif default_type == "lambda":
+        assert isinstance(item["default"], list)
+        default = [e.encode() for e in item["default"]]
+        item["default"] = lambda: default
+    elif default_type == "lazy_module":
+        item["default"] = lambda: encoding.encoding
+    else:
+        if default_type is not None:
+            msg = "invalid default config type %r for '%s.%s'"
+            msg %= (default_type, item["section"], item["name"])
+            raise error.ProgrammingError(msg)
+
+    # config expects bytes
+    alias = item.get("alias")
+    if alias:
+        item["alias"] = [(k.encode(), v.encode()) for (k, v) in alias]
+    if isinstance(item.get("default"), str):
+        item["default"] = item["default"].encode()
+    item["section"] = item["section"].encode()
+    item["name"] = item["name"].encode()
+
+
+def read_configitems_file():
+    """Returns the deserialized TOML structure from the configitems file"""
+    with resourceutil.open_resource(b"mercurial", b"configitems.toml") as fp:
+        return tomllib.load(fp)
+
+
+def configitems_from_toml(items):
+    """Register the configitems from the *deserialized* toml file"""
+    for item in items["items"]:
+        sanitize_item(item)
+        coreconfigitem(**item)
+
+    templates = items["templates"]
+
+    for application in items["template-applications"]:
+        template_items = templates[application["template"]]
+
+        for template_item in template_items:
+            item = template_item.copy()
+            prefix = application.get("prefix", "")
+            item["section"] = application["section"]
+            if prefix:
+                item["name"] = f'{prefix}.{item["suffix"]}'
+            else:
+                item["name"] = item["suffix"]
+
+            sanitize_item(item)
+            item.pop("suffix", None)
+            coreconfigitem(**item)
+
+
+def import_configitems_from_file():
+    as_toml = read_configitems_file()
+    configitems_from_toml(as_toml)
+
+
 coreitems = {}
 
 
@@ -129,2856 +211,4 @@
 
 coreconfigitem = getitemregister(coreitems)
 
-
-def _registerdiffopts(section, configprefix=b''):
-    coreconfigitem(
-        section,
-        configprefix + b'nodates',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'showfunc',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'unified',
-        default=None,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'git',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'ignorews',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'ignorewsamount',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'ignoreblanklines',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'ignorewseol',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'nobinary',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'noprefix',
-        default=False,
-    )
-    coreconfigitem(
-        section,
-        configprefix + b'word-diff',
-        default=False,
-    )
-
-
-coreconfigitem(
-    b'alias',
-    b'.*',
-    default=dynamicdefault,
-    generic=True,
-)
-coreconfigitem(
-    b'auth',
-    b'cookiefile',
-    default=None,
-)
-_registerdiffopts(section=b'annotate')
-# bookmarks.pushing: internal hack for discovery
-coreconfigitem(
-    b'bookmarks',
-    b'pushing',
-    default=list,
-)
-# bundle.mainreporoot: internal hack for bundlerepo
-coreconfigitem(
-    b'bundle',
-    b'mainreporoot',
-    default=b'',
-)
-coreconfigitem(
-    b'censor',
-    b'policy',
-    default=b'abort',
-    experimental=True,
-)
-coreconfigitem(
-    b'chgserver',
-    b'idletimeout',
-    default=3600,
-)
-coreconfigitem(
-    b'chgserver',
-    b'skiphash',
-    default=False,
-)
-coreconfigitem(
-    b'cmdserver',
-    b'log',
-    default=None,
-)
-coreconfigitem(
-    b'cmdserver',
-    b'max-log-files',
-    default=7,
-)
-coreconfigitem(
-    b'cmdserver',
-    b'max-log-size',
-    default=b'1 MB',
-)
-coreconfigitem(
-    b'cmdserver',
-    b'max-repo-cache',
-    default=0,
-    experimental=True,
-)
-coreconfigitem(
-    b'cmdserver',
-    b'message-encodings',
-    default=list,
-)
-coreconfigitem(
-    b'cmdserver',
-    b'track-log',
-    default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
-)
-coreconfigitem(
-    b'cmdserver',
-    b'shutdown-on-interrupt',
-    default=True,
-)
-coreconfigitem(
-    b'color',
-    b'.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'color',
-    b'mode',
-    default=b'auto',
-)
-coreconfigitem(
-    b'color',
-    b'pagermode',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'command-templates',
-    b'graphnode',
-    default=None,
-    alias=[(b'ui', b'graphnodetemplate')],
-)
-coreconfigitem(
-    b'command-templates',
-    b'log',
-    default=None,
-    alias=[(b'ui', b'logtemplate')],
-)
-coreconfigitem(
-    b'command-templates',
-    b'mergemarker',
-    default=(
-        b'{node|short} '
-        b'{ifeq(tags, "tip", "", '
-        b'ifeq(tags, "", "", "{tags} "))}'
-        b'{if(bookmarks, "{bookmarks} ")}'
-        b'{ifeq(branch, "default", "", "{branch} ")}'
-        b'- {author|user}: {desc|firstline}'
-    ),
-    alias=[(b'ui', b'mergemarkertemplate')],
-)
-coreconfigitem(
-    b'command-templates',
-    b'pre-merge-tool-output',
-    default=None,
-    alias=[(b'ui', b'pre-merge-tool-output-template')],
-)
-coreconfigitem(
-    b'command-templates',
-    b'oneline-summary',
-    default=None,
-)
-coreconfigitem(
-    b'command-templates',
-    b'oneline-summary.*',
-    default=dynamicdefault,
-    generic=True,
-)
-_registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
-coreconfigitem(
-    b'commands',
-    b'commit.post-status',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'grep.all-files',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'commands',
-    b'merge.require-rev',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'push.require-revs',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'resolve.confirm',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'resolve.explicit-re-merge',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'resolve.mark-check',
-    default=b'none',
-)
-_registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
-coreconfigitem(
-    b'commands',
-    b'show.aliasprefix',
-    default=list,
-)
-coreconfigitem(
-    b'commands',
-    b'status.relative',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'status.skipstates',
-    default=[],
-    experimental=True,
-)
-coreconfigitem(
-    b'commands',
-    b'status.terse',
-    default=b'',
-)
-coreconfigitem(
-    b'commands',
-    b'status.verbose',
-    default=False,
-)
-coreconfigitem(
-    b'commands',
-    b'update.check',
-    default=None,
-)
-coreconfigitem(
-    b'commands',
-    b'update.requiredest',
-    default=False,
-)
-coreconfigitem(
-    b'committemplate',
-    b'.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'convert',
-    b'bzr.saverev',
-    default=True,
-)
-coreconfigitem(
-    b'convert',
-    b'cvsps.cache',
-    default=True,
-)
-coreconfigitem(
-    b'convert',
-    b'cvsps.fuzz',
-    default=60,
-)
-coreconfigitem(
-    b'convert',
-    b'cvsps.logencoding',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'cvsps.mergefrom',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'cvsps.mergeto',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'git.committeractions',
-    default=lambda: [b'messagedifferent'],
-)
-coreconfigitem(
-    b'convert',
-    b'git.extrakeys',
-    default=list,
-)
-coreconfigitem(
-    b'convert',
-    b'git.findcopiesharder',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'git.remoteprefix',
-    default=b'remote',
-)
-coreconfigitem(
-    b'convert',
-    b'git.renamelimit',
-    default=400,
-)
-coreconfigitem(
-    b'convert',
-    b'git.saverev',
-    default=True,
-)
-coreconfigitem(
-    b'convert',
-    b'git.similarity',
-    default=50,
-)
-coreconfigitem(
-    b'convert',
-    b'git.skipsubmodules',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.clonebranches',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.ignoreerrors',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.preserve-hash',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.revs',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.saverev',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.sourcename',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.startrev',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'hg.tagsbranch',
-    default=b'default',
-)
-coreconfigitem(
-    b'convert',
-    b'hg.usebranchnames',
-    default=True,
-)
-coreconfigitem(
-    b'convert',
-    b'ignoreancestorcheck',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'convert',
-    b'localtimezone',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'p4.encoding',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'convert',
-    b'p4.startrev',
-    default=0,
-)
-coreconfigitem(
-    b'convert',
-    b'skiptags',
-    default=False,
-)
-coreconfigitem(
-    b'convert',
-    b'svn.debugsvnlog',
-    default=True,
-)
-coreconfigitem(
-    b'convert',
-    b'svn.trunk',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'svn.tags',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'svn.branches',
-    default=None,
-)
-coreconfigitem(
-    b'convert',
-    b'svn.startrev',
-    default=0,
-)
-coreconfigitem(
-    b'convert',
-    b'svn.dangerous-set-commit-dates',
-    default=False,
-)
-coreconfigitem(
-    b'debug',
-    b'dirstate.delaywrite',
-    default=0,
-)
-coreconfigitem(
-    b'debug',
-    b'revlog.verifyposition.changelog',
-    default=b'',
-)
-coreconfigitem(
-    b'debug',
-    b'revlog.debug-delta',
-    default=False,
-)
-# display extra information about the bundling process
-coreconfigitem(
-    b'debug',
-    b'bundling-stats',
-    default=False,
-)
-# display extra information about the unbundling process
-coreconfigitem(
-    b'debug',
-    b'unbundling-stats',
-    default=False,
-)
-coreconfigitem(
-    b'defaults',
-    b'.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'devel',
-    b'all-warnings',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'bundle2.debug',
-    default=False,
-)
-# which kind of delta to put in the bundled changegroup. Possible value
-# - '': use default behavior
-# - p1: force to always use delta against p1
-# - full: force to always use full content
-coreconfigitem(
-    b'devel',
-    b'bundle.delta',
-    default=b'',
-)
-coreconfigitem(
-    b'devel',
-    b'cache-vfs',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'check-locks',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'check-relroot',
-    default=False,
-)
-# Track copy information for all file, not just "added" one (very slow)
-coreconfigitem(
-    b'devel',
-    b'copy-tracing.trace-all-files',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'default-date',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'deprec-warn',
-    default=False,
-)
-# possible values:
-# - auto (the default)
-# - force-append
-# - force-new
-coreconfigitem(
-    b'devel',
-    b'dirstate.v2.data_update_mode',
-    default="auto",
-)
-coreconfigitem(
-    b'devel',
-    b'disableloaddefaultcerts',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'warn-empty-changegroup',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'legacy.exchange',
-    default=list,
-)
-# When True, revlogs use a special reference version of the nodemap, that is not
-# performant but is "known" to behave properly.
-coreconfigitem(
-    b'devel',
-    b'persistent-nodemap',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'servercafile',
-    default=b'',
-)
-# This config option is intended for use in tests only. It is a giant
-# footgun to kill security. Don't define it.
-coreconfigitem(
-    b'devel',
-    b'server-insecure-exact-protocol',
-    default=b'',
-)
-coreconfigitem(
-    b'devel',
-    b'serverrequirecert',
-    default=False,
-)
-# Makes the status algorithm wait for the existence of this file
-# (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
-# seconds) before taking the lock and writing the dirstate.
-# Status signals that it's ready to wait by creating a file
-# with the same name + `.waiting`.
-# Useful when testing race conditions.
-coreconfigitem(
-    b'devel',
-    b'sync.status.pre-dirstate-write-file',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'sync.status.pre-dirstate-write-file-timeout',
-    default=2,
-)
-coreconfigitem(
-    b'devel',
-    b'sync.dirstate.post-docket-read-file',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'sync.dirstate.post-docket-read-file-timeout',
-    default=2,
-)
-coreconfigitem(
-    b'devel',
-    b'sync.dirstate.pre-read-file',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'sync.dirstate.pre-read-file-timeout',
-    default=2,
-)
-coreconfigitem(
-    b'devel',
-    b'strip-obsmarkers',
-    default=True,
-)
-coreconfigitem(
-    b'devel',
-    b'warn-config',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'warn-config-default',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'user.obsmarker',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'warn-config-unknown',
-    default=None,
-)
-coreconfigitem(
-    b'devel',
-    b'debug.copies',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'copy-tracing.multi-thread',
-    default=True,
-)
-coreconfigitem(
-    b'devel',
-    b'debug.extensions',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'debug.repo-filters',
-    default=False,
-)
-coreconfigitem(
-    b'devel',
-    b'debug.peer-request',
-    default=False,
-)
-# If discovery.exchange-heads is False, the discovery will not start with
-# remote head fetching and local head querying.
-coreconfigitem(
-    b'devel',
-    b'discovery.exchange-heads',
-    default=True,
-)
-# If devel.debug.abort-update is True, then any merge with the working copy,
-# e.g. [hg update], will be aborted after figuring out what needs to be done,
-# but before spawning the parallel worker
-coreconfigitem(
-    b'devel',
-    b'debug.abort-update',
-    default=False,
-)
-# If discovery.grow-sample is False, the sample size used in set discovery will
-# not be increased through the process
-coreconfigitem(
-    b'devel',
-    b'discovery.grow-sample',
-    default=True,
-)
-# When discovery.grow-sample.dynamic is True, the default, the sample size is
-# adapted to the shape of the undecided set (it is set to the max of:
-# <target-size>, len(roots(undecided)), len(heads(undecided)
-coreconfigitem(
-    b'devel',
-    b'discovery.grow-sample.dynamic',
-    default=True,
-)
-# discovery.grow-sample.rate control the rate at which the sample grow
-coreconfigitem(
-    b'devel',
-    b'discovery.grow-sample.rate',
-    default=1.05,
-)
-# If discovery.randomize is False, random sampling during discovery are
-# deterministic. It is meant for integration tests.
-coreconfigitem(
-    b'devel',
-    b'discovery.randomize',
-    default=True,
-)
-# Control the initial size of the discovery sample
-coreconfigitem(
-    b'devel',
-    b'discovery.sample-size',
-    default=200,
-)
-# Control the initial size of the discovery for initial change
-coreconfigitem(
-    b'devel',
-    b'discovery.sample-size.initial',
-    default=100,
-)
-_registerdiffopts(section=b'diff')
-coreconfigitem(
-    b'diff',
-    b'merge',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'email',
-    b'bcc',
-    default=None,
-)
-coreconfigitem(
-    b'email',
-    b'cc',
-    default=None,
-)
-coreconfigitem(
-    b'email',
-    b'charsets',
-    default=list,
-)
-coreconfigitem(
-    b'email',
-    b'from',
-    default=None,
-)
-coreconfigitem(
-    b'email',
-    b'method',
-    default=b'smtp',
-)
-coreconfigitem(
-    b'email',
-    b'reply-to',
-    default=None,
-)
-coreconfigitem(
-    b'email',
-    b'to',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'archivemetatemplate',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'experimental',
-    b'auto-publish',
-    default=b'publish',
-)
-coreconfigitem(
-    b'experimental',
-    b'bundle-phases',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundle2-advertise',
-    default=True,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundle2-output-capture',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundle2.pushback',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundle2lazylocking',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecomplevel',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecomplevel.bzip2',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecomplevel.gzip',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecomplevel.none',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecomplevel.zstd',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecompthreads',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecompthreads.bzip2',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecompthreads.gzip',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecompthreads.none',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'bundlecompthreads.zstd',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'changegroup3',
-    default=True,
-)
-coreconfigitem(
-    b'experimental',
-    b'changegroup4',
-    default=False,
-)
-
-# might remove rank configuration once the computation has no impact
-coreconfigitem(
-    b'experimental',
-    b'changelog-v2.compute-rank',
-    default=True,
-)
-coreconfigitem(
-    b'experimental',
-    b'cleanup-as-archived',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'clientcompressionengines',
-    default=list,
-)
-coreconfigitem(
-    b'experimental',
-    b'copytrace',
-    default=b'on',
-)
-coreconfigitem(
-    b'experimental',
-    b'copytrace.movecandidateslimit',
-    default=100,
-)
-coreconfigitem(
-    b'experimental',
-    b'copytrace.sourcecommitlimit',
-    default=100,
-)
-coreconfigitem(
-    b'experimental',
-    b'copies.read-from',
-    default=b"filelog-only",
-)
-coreconfigitem(
-    b'experimental',
-    b'copies.write-to',
-    default=b'filelog-only',
-)
-coreconfigitem(
-    b'experimental',
-    b'crecordtest',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'directaccess',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'directaccess.revnums',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'editortmpinhg',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution',
-    default=list,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.allowdivergence',
-    default=False,
-    alias=[(b'experimental', b'allowdivergence')],
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.allowunstable',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.createmarkers',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.effect-flags',
-    default=True,
-    alias=[(b'experimental', b'effect-flags')],
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.exchange',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.bundle-obsmarker',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.bundle-obsmarker:mandatory',
-    default=True,
-)
-coreconfigitem(
-    b'experimental',
-    b'log.topo',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.report-instabilities',
-    default=True,
-)
-coreconfigitem(
-    b'experimental',
-    b'evolution.track-operation',
-    default=True,
-)
-# repo-level config to exclude a revset visibility
-#
-# The target use case is to use `share` to expose different subset of the same
-# repository, especially server side. See also `server.view`.
-coreconfigitem(
-    b'experimental',
-    b'extra-filter-revs',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'maxdeltachainspan',
-    default=-1,
-)
-# tracks files which were undeleted (merge might delete them but we explicitly
-# kept/undeleted them) and creates new filenodes for them
-coreconfigitem(
-    b'experimental',
-    b'merge-track-salvaged',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'mmapindexthreshold',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'narrow',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'nonnormalparanoidcheck',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'exportableenviron',
-    default=list,
-)
-coreconfigitem(
-    b'experimental',
-    b'extendedheader.index',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'extendedheader.similarity',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'graphshorten',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'graphstyle.parent',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'experimental',
-    b'graphstyle.missing',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'experimental',
-    b'graphstyle.grandparent',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'experimental',
-    b'hook-track-tags',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'httppostargs',
-    default=False,
-)
-coreconfigitem(b'experimental', b'nointerrupt', default=False)
-coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
-
-coreconfigitem(
-    b'experimental',
-    b'obsmarkers-exchange-debug',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'remotenames',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'removeemptydirs',
-    default=True,
-)
-coreconfigitem(
-    b'experimental',
-    b'revert.interactive.select-to-keep',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'revisions.prefixhexnode',
-    default=False,
-)
-# "out of experimental" todo list.
-#
-# * include management of a persistent nodemap in the main docket
-# * enforce a "no-truncate" policy for mmap safety
-#      - for censoring operation
-#      - for stripping operation
-#      - for rollback operation
-# * proper streaming (race free) of the docket file
-# * track garbage data to evemtually allow rewriting -existing- sidedata.
-# * Exchange-wise, we will also need to do something more efficient than
-#   keeping references to the affected revlogs, especially memory-wise when
-#   rewriting sidedata.
-# * introduce a proper solution to reduce the number of filelog related files.
-# * use caching for reading sidedata (similar to what we do for data).
-# * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
-# * Improvement to consider
-#   - avoid compression header in chunk using the default compression?
-#   - forbid "inline" compression mode entirely?
-#   - split the data offset and flag field (the 2 bytes save are mostly trouble)
-#   - keep track of uncompressed -chunk- size (to preallocate memory better)
-#   - keep track of chain base or size (probably not that useful anymore)
-coreconfigitem(
-    b'experimental',
-    b'revlogv2',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'revisions.disambiguatewithin',
-    default=None,
-)
-coreconfigitem(
-    b'experimental',
-    b'rust.index',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'server.allow-hidden-access',
-    default=list,
-)
-coreconfigitem(
-    b'experimental',
-    b'server.filesdata.recommended-batch-size',
-    default=50000,
-)
-coreconfigitem(
-    b'experimental',
-    b'server.manifestdata.recommended-batch-size',
-    default=100000,
-)
-coreconfigitem(
-    b'experimental',
-    b'server.stream-narrow-clones',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'single-head-per-branch',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'single-head-per-branch:account-closed-heads',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'single-head-per-branch:public-changes-only',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'sparse-read',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'sparse-read.density-threshold',
-    default=0.50,
-)
-coreconfigitem(
-    b'experimental',
-    b'sparse-read.min-gap-size',
-    default=b'65K',
-)
-coreconfigitem(
-    b'experimental',
-    b'stream-v3',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'treemanifest',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'update.atomic-file',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'web.full-garbage-collection-rate',
-    default=1,  # still forcing a full collection on each request
-)
-coreconfigitem(
-    b'experimental',
-    b'worker.wdir-get-thread-safe',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'worker.repository-upgrade',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'xdiff',
-    default=False,
-)
-coreconfigitem(
-    b'extensions',
-    b'[^:]*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'extensions',
-    b'[^:]*:required',
-    default=False,
-    generic=True,
-)
-coreconfigitem(
-    b'extdata',
-    b'.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'format',
-    b'bookmarks-in-store',
-    default=False,
-)
-coreconfigitem(
-    b'format',
-    b'chunkcachesize',
-    default=None,
-    experimental=True,
-)
-coreconfigitem(
-    # Enable this dirstate format *when creating a new repository*.
-    # Which format to use for existing repos is controlled by .hg/requires
-    b'format',
-    b'use-dirstate-v2',
-    default=False,
-    experimental=True,
-    alias=[(b'format', b'exp-rc-dirstate-v2')],
-)
-coreconfigitem(
-    b'format',
-    b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-dirstate-tracked-hint',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-dirstate-tracked-hint.version',
-    default=1,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'dotencode',
-    default=True,
-)
-coreconfigitem(
-    b'format',
-    b'generaldelta',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'manifestcachesize',
-    default=None,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'maxchainlen',
-    default=dynamicdefault,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'obsstore-version',
-    default=None,
-)
-coreconfigitem(
-    b'format',
-    b'sparse-revlog',
-    default=True,
-)
-coreconfigitem(
-    b'format',
-    b'revlog-compression',
-    default=lambda: [b'zstd', b'zlib'],
-    alias=[(b'experimental', b'format.compression')],
-)
-# Experimental TODOs:
-#
-# * Same as for revlogv2 (but for the reduction of the number of files)
-# * Actually computing the rank of changesets
-# * Improvement to investigate
-#   - storing .hgtags fnode
-#   - storing branch related identifier
-
-coreconfigitem(
-    b'format',
-    b'exp-use-changelog-v2',
-    default=None,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'usefncache',
-    default=True,
-)
-coreconfigitem(
-    b'format',
-    b'usegeneraldelta',
-    default=True,
-)
-coreconfigitem(
-    b'format',
-    b'usestore',
-    default=True,
-)
-
-
-def _persistent_nodemap_default():
-    """compute `use-persistent-nodemap` default value
-
-    The feature is disabled unless a fast implementation is available.
-    """
-    from . import policy
-
-    return policy.importrust('revlog') is not None
-
-
-coreconfigitem(
-    b'format',
-    b'use-persistent-nodemap',
-    default=_persistent_nodemap_default,
-)
-coreconfigitem(
-    b'format',
-    b'exp-use-copies-side-data-changeset',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-share-safe',
-    default=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
-    b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
-    default=False,
-    experimental=True,
-)
-
-# Moving this on by default means we are confident about the scaling of phases.
-# This is not garanteed to be the case at the time this message is written.
-coreconfigitem(
-    b'format',
-    b'use-internal-phase',
-    default=False,
-    experimental=True,
-)
-# The interaction between the archived phase and obsolescence markers needs to
-# be sorted out before wider usage of this are to be considered.
-#
-# At the time this message is written, behavior when archiving obsolete
-# changeset differ significantly from stripping. As part of stripping, we also
-# remove the obsolescence marker associated to the stripped changesets,
-# revealing the precedecessors changesets when applicable. When archiving, we
-# don't touch the obsolescence markers, keeping everything hidden. This can
-# result in quite confusing situation for people combining exchanging draft
-# with the archived phases. As some markers needed by others may be skipped
-# during exchange.
-coreconfigitem(
-    b'format',
-    b'exp-archived-phase',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'shelve',
-    b'store',
-    default=b'internal',
-    experimental=True,
-)
-coreconfigitem(
-    b'fsmonitor',
-    b'warn_when_unused',
-    default=True,
-)
-coreconfigitem(
-    b'fsmonitor',
-    b'warn_update_file_count',
-    default=50000,
-)
-coreconfigitem(
-    b'fsmonitor',
-    b'warn_update_file_count_rust',
-    default=400000,
-)
-coreconfigitem(
-    b'help',
-    br'hidden-command\..*',
-    default=False,
-    generic=True,
-)
-coreconfigitem(
-    b'help',
-    br'hidden-topic\..*',
-    default=False,
-    generic=True,
-)
-coreconfigitem(
-    b'hooks',
-    b'[^:]*',
-    default=dynamicdefault,
-    generic=True,
-)
-coreconfigitem(
-    b'hooks',
-    b'.*:run-with-plain',
-    default=True,
-    generic=True,
-)
-coreconfigitem(
-    b'hgweb-paths',
-    b'.*',
-    default=list,
-    generic=True,
-)
-coreconfigitem(
-    b'hostfingerprints',
-    b'.*',
-    default=list,
-    generic=True,
-)
-coreconfigitem(
-    b'hostsecurity',
-    b'ciphers',
-    default=None,
-)
-coreconfigitem(
-    b'hostsecurity',
-    b'minimumprotocol',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'hostsecurity',
-    b'.*:minimumprotocol$',
-    default=dynamicdefault,
-    generic=True,
-)
-coreconfigitem(
-    b'hostsecurity',
-    b'.*:ciphers$',
-    default=dynamicdefault,
-    generic=True,
-)
-coreconfigitem(
-    b'hostsecurity',
-    b'.*:fingerprints$',
-    default=list,
-    generic=True,
-)
-coreconfigitem(
-    b'hostsecurity',
-    b'.*:verifycertsfile$',
-    default=None,
-    generic=True,
-)
-
-coreconfigitem(
-    b'http_proxy',
-    b'always',
-    default=False,
-)
-coreconfigitem(
-    b'http_proxy',
-    b'host',
-    default=None,
-)
-coreconfigitem(
-    b'http_proxy',
-    b'no',
-    default=list,
-)
-coreconfigitem(
-    b'http_proxy',
-    b'passwd',
-    default=None,
-)
-coreconfigitem(
-    b'http_proxy',
-    b'user',
-    default=None,
-)
-
-coreconfigitem(
-    b'http',
-    b'timeout',
-    default=None,
-)
-
-coreconfigitem(
-    b'logtoprocess',
-    b'commandexception',
-    default=None,
-)
-coreconfigitem(
-    b'logtoprocess',
-    b'commandfinish',
-    default=None,
-)
-coreconfigitem(
-    b'logtoprocess',
-    b'command',
-    default=None,
-)
-coreconfigitem(
-    b'logtoprocess',
-    b'develwarn',
-    default=None,
-)
-coreconfigitem(
-    b'logtoprocess',
-    b'uiblocked',
-    default=None,
-)
-coreconfigitem(
-    b'merge',
-    b'checkunknown',
-    default=b'abort',
-)
-coreconfigitem(
-    b'merge',
-    b'checkignored',
-    default=b'abort',
-)
-coreconfigitem(
-    b'experimental',
-    b'merge.checkpathconflicts',
-    default=False,
-)
-coreconfigitem(
-    b'merge',
-    b'followcopies',
-    default=True,
-)
-coreconfigitem(
-    b'merge',
-    b'on-failure',
-    default=b'continue',
-)
-coreconfigitem(
-    b'merge',
-    b'preferancestor',
-    default=lambda: [b'*'],
-    experimental=True,
-)
-coreconfigitem(
-    b'merge',
-    b'strict-capability-check',
-    default=False,
-)
-coreconfigitem(
-    b'merge',
-    b'disable-partial-tools',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'partial-merge-tools',
-    b'.*',
-    default=None,
-    generic=True,
-    experimental=True,
-)
-coreconfigitem(
-    b'partial-merge-tools',
-    br'.*\.patterns',
-    default=dynamicdefault,
-    generic=True,
-    priority=-1,
-    experimental=True,
-)
-coreconfigitem(
-    b'partial-merge-tools',
-    br'.*\.executable$',
-    default=dynamicdefault,
-    generic=True,
-    priority=-1,
-    experimental=True,
-)
-coreconfigitem(
-    b'partial-merge-tools',
-    br'.*\.order',
-    default=0,
-    generic=True,
-    priority=-1,
-    experimental=True,
-)
-coreconfigitem(
-    b'partial-merge-tools',
-    br'.*\.args',
-    default=b"$local $base $other",
-    generic=True,
-    priority=-1,
-    experimental=True,
-)
-coreconfigitem(
-    b'partial-merge-tools',
-    br'.*\.disable',
-    default=False,
-    generic=True,
-    priority=-1,
-    experimental=True,
-)
-coreconfigitem(
-    b'merge-tools',
-    b'.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.args$',
-    default=b"$local $base $other",
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.binary$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.check$',
-    default=list,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.checkchanged$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.executable$',
-    default=dynamicdefault,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.fixeol$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.gui$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.mergemarkers$',
-    default=b'basic',
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.mergemarkertemplate$',
-    default=dynamicdefault,  # take from command-templates.mergemarker
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.priority$',
-    default=0,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.premerge$',
-    default=dynamicdefault,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.regappend$',
-    default=b"",
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'merge-tools',
-    br'.*\.symlink$',
-    default=False,
-    generic=True,
-    priority=-1,
-)
-coreconfigitem(
-    b'pager',
-    b'attend-.*',
-    default=dynamicdefault,
-    generic=True,
-)
-coreconfigitem(
-    b'pager',
-    b'ignore',
-    default=list,
-)
-coreconfigitem(
-    b'pager',
-    b'pager',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'patch',
-    b'eol',
-    default=b'strict',
-)
-coreconfigitem(
-    b'patch',
-    b'fuzz',
-    default=2,
-)
-coreconfigitem(
-    b'paths',
-    b'default',
-    default=None,
-)
-coreconfigitem(
-    b'paths',
-    b'default-push',
-    default=None,
-)
-coreconfigitem(
-    b'paths',
-    b'[^:]*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'paths',
-    b'.*:bookmarks.mode',
-    default='default',
-    generic=True,
-)
-coreconfigitem(
-    b'paths',
-    b'.*:multi-urls',
-    default=False,
-    generic=True,
-)
-coreconfigitem(
-    b'paths',
-    b'.*:pushrev',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'paths',
-    b'.*:pushurl',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'paths',
-    b'.*:pulled-delta-reuse-policy',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'phases',
-    b'checksubrepos',
-    default=b'follow',
-)
-coreconfigitem(
-    b'phases',
-    b'new-commit',
-    default=b'draft',
-)
-coreconfigitem(
-    b'phases',
-    b'publish',
-    default=True,
-)
-coreconfigitem(
-    b'profiling',
-    b'enabled',
-    default=False,
-)
-coreconfigitem(
-    b'profiling',
-    b'format',
-    default=b'text',
-)
-coreconfigitem(
-    b'profiling',
-    b'freq',
-    default=1000,
-)
-coreconfigitem(
-    b'profiling',
-    b'limit',
-    default=30,
-)
-coreconfigitem(
-    b'profiling',
-    b'nested',
-    default=0,
-)
-coreconfigitem(
-    b'profiling',
-    b'output',
-    default=None,
-)
-coreconfigitem(
-    b'profiling',
-    b'showmax',
-    default=0.999,
-)
-coreconfigitem(
-    b'profiling',
-    b'showmin',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'profiling',
-    b'showtime',
-    default=True,
-)
-coreconfigitem(
-    b'profiling',
-    b'sort',
-    default=b'inlinetime',
-)
-coreconfigitem(
-    b'profiling',
-    b'statformat',
-    default=b'hotpath',
-)
-coreconfigitem(
-    b'profiling',
-    b'time-track',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'profiling',
-    b'type',
-    default=b'stat',
-)
-coreconfigitem(
-    b'progress',
-    b'assume-tty',
-    default=False,
-)
-coreconfigitem(
-    b'progress',
-    b'changedelay',
-    default=1,
-)
-coreconfigitem(
-    b'progress',
-    b'clear-complete',
-    default=True,
-)
-coreconfigitem(
-    b'progress',
-    b'debug',
-    default=False,
-)
-coreconfigitem(
-    b'progress',
-    b'delay',
-    default=3,
-)
-coreconfigitem(
-    b'progress',
-    b'disable',
-    default=False,
-)
-coreconfigitem(
-    b'progress',
-    b'estimateinterval',
-    default=60.0,
-)
-coreconfigitem(
-    b'progress',
-    b'format',
-    default=lambda: [b'topic', b'bar', b'number', b'estimate'],
-)
-coreconfigitem(
-    b'progress',
-    b'refresh',
-    default=0.1,
-)
-coreconfigitem(
-    b'progress',
-    b'width',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'pull',
-    b'confirm',
-    default=False,
-)
-coreconfigitem(
-    b'push',
-    b'pushvars.server',
-    default=False,
-)
-coreconfigitem(
-    b'rewrite',
-    b'backup-bundle',
-    default=True,
-    alias=[(b'ui', b'history-editing-backup')],
-)
-coreconfigitem(
-    b'rewrite',
-    b'update-timestamp',
-    default=False,
-)
-coreconfigitem(
-    b'rewrite',
-    b'empty-successor',
-    default=b'skip',
-    experimental=True,
-)
-# experimental as long as format.use-dirstate-v2 is.
-coreconfigitem(
-    b'storage',
-    b'dirstate-v2.slow-path',
-    default=b"abort",
-    experimental=True,
-)
-coreconfigitem(
-    b'storage',
-    b'new-repo-backend',
-    default=b'revlogv1',
-    experimental=True,
-)
-coreconfigitem(
-    b'storage',
-    b'revlog.optimize-delta-parent-choice',
-    default=True,
-    alias=[(b'format', b'aggressivemergedeltas')],
-)
-coreconfigitem(
-    b'storage',
-    b'revlog.delta-parent-search.candidate-group-chunk-size',
-    default=20,
-)
-coreconfigitem(
-    b'storage',
-    b'revlog.issue6528.fix-incoming',
-    default=True,
-)
-# experimental as long as rust is experimental (or a C version is implemented)
-coreconfigitem(
-    b'storage',
-    b'revlog.persistent-nodemap.mmap',
-    default=True,
-)
-# experimental as long as format.use-persistent-nodemap is.
-coreconfigitem(
-    b'storage',
-    b'revlog.persistent-nodemap.slow-path',
-    default=b"abort",
-)
-
-coreconfigitem(
-    b'storage',
-    b'revlog.reuse-external-delta',
-    default=True,
-)
-# This option is True unless `format.generaldelta` is set.
-coreconfigitem(
-    b'storage',
-    b'revlog.reuse-external-delta-parent',
-    default=None,
-)
-coreconfigitem(
-    b'storage',
-    b'revlog.zlib.level',
-    default=None,
-)
-coreconfigitem(
-    b'storage',
-    b'revlog.zstd.level',
-    default=None,
-)
-coreconfigitem(
-    b'server',
-    b'bookmarks-pushkey-compat',
-    default=True,
-)
-coreconfigitem(
-    b'server',
-    b'bundle1',
-    default=True,
-)
-coreconfigitem(
-    b'server',
-    b'bundle1gd',
-    default=None,
-)
-coreconfigitem(
-    b'server',
-    b'bundle1.pull',
-    default=None,
-)
-coreconfigitem(
-    b'server',
-    b'bundle1gd.pull',
-    default=None,
-)
-coreconfigitem(
-    b'server',
-    b'bundle1.push',
-    default=None,
-)
-coreconfigitem(
-    b'server',
-    b'bundle1gd.push',
-    default=None,
-)
-coreconfigitem(
-    b'server',
-    b'bundle2.stream',
-    default=True,
-    alias=[(b'experimental', b'bundle2.stream')],
-)
-coreconfigitem(
-    b'server',
-    b'compressionengines',
-    default=list,
-)
-coreconfigitem(
-    b'server',
-    b'concurrent-push-mode',
-    default=b'check-related',
-)
-coreconfigitem(
-    b'server',
-    b'disablefullbundle',
-    default=False,
-)
-coreconfigitem(
-    b'server',
-    b'maxhttpheaderlen',
-    default=1024,
-)
-coreconfigitem(
-    b'server',
-    b'pullbundle',
-    default=True,
-)
-coreconfigitem(
-    b'server',
-    b'preferuncompressed',
-    default=False,
-)
-coreconfigitem(
-    b'server',
-    b'streamunbundle',
-    default=False,
-)
-coreconfigitem(
-    b'server',
-    b'uncompressed',
-    default=True,
-)
-coreconfigitem(
-    b'server',
-    b'uncompressedallowsecret',
-    default=False,
-)
-coreconfigitem(
-    b'server',
-    b'view',
-    default=b'served',
-)
-coreconfigitem(
-    b'server',
-    b'validate',
-    default=False,
-)
-coreconfigitem(
-    b'server',
-    b'zliblevel',
-    default=-1,
-)
-coreconfigitem(
-    b'server',
-    b'zstdlevel',
-    default=3,
-)
-coreconfigitem(
-    b'share',
-    b'pool',
-    default=None,
-)
-coreconfigitem(
-    b'share',
-    b'poolnaming',
-    default=b'identity',
-)
-coreconfigitem(
-    b'share',
-    b'safe-mismatch.source-not-safe',
-    default=b'abort',
-)
-coreconfigitem(
-    b'share',
-    b'safe-mismatch.source-safe',
-    default=b'abort',
-)
-coreconfigitem(
-    b'share',
-    b'safe-mismatch.source-not-safe.warn',
-    default=True,
-)
-coreconfigitem(
-    b'share',
-    b'safe-mismatch.source-safe.warn',
-    default=True,
-)
-coreconfigitem(
-    b'share',
-    b'safe-mismatch.source-not-safe:verbose-upgrade',
-    default=True,
-)
-coreconfigitem(
-    b'share',
-    b'safe-mismatch.source-safe:verbose-upgrade',
-    default=True,
-)
-coreconfigitem(
-    b'shelve',
-    b'maxbackups',
-    default=10,
-)
-coreconfigitem(
-    b'smtp',
-    b'host',
-    default=None,
-)
-coreconfigitem(
-    b'smtp',
-    b'local_hostname',
-    default=None,
-)
-coreconfigitem(
-    b'smtp',
-    b'password',
-    default=None,
-)
-coreconfigitem(
-    b'smtp',
-    b'port',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'smtp',
-    b'tls',
-    default=b'none',
-)
-coreconfigitem(
-    b'smtp',
-    b'username',
-    default=None,
-)
-coreconfigitem(
-    b'sparse',
-    b'missingwarning',
-    default=True,
-    experimental=True,
-)
-coreconfigitem(
-    b'subrepos',
-    b'allowed',
-    default=dynamicdefault,  # to make backporting simpler
-)
-coreconfigitem(
-    b'subrepos',
-    b'hg:allowed',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'subrepos',
-    b'git:allowed',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'subrepos',
-    b'svn:allowed',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'templates',
-    b'.*',
-    default=None,
-    generic=True,
-)
-coreconfigitem(
-    b'templateconfig',
-    b'.*',
-    default=dynamicdefault,
-    generic=True,
-)
-coreconfigitem(
-    b'trusted',
-    b'groups',
-    default=list,
-)
-coreconfigitem(
-    b'trusted',
-    b'users',
-    default=list,
-)
-coreconfigitem(
-    b'ui',
-    b'_usedassubrepo',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'allowemptycommit',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'archivemeta',
-    default=True,
-)
-coreconfigitem(
-    b'ui',
-    b'askusername',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'available-memory',
-    default=None,
-)
-
-coreconfigitem(
-    b'ui',
-    b'clonebundlefallback',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'clonebundleprefers',
-    default=list,
-)
-coreconfigitem(
-    b'ui',
-    b'clonebundles',
-    default=True,
-)
-coreconfigitem(
-    b'ui',
-    b'color',
-    default=b'auto',
-)
-coreconfigitem(
-    b'ui',
-    b'commitsubrepos',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'debug',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'debugger',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'editor',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'ui',
-    b'detailed-exit-code',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'ui',
-    b'fallbackencoding',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'forcecwd',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'forcemerge',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'formatdebug',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'formatjson',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'formatted',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'interactive',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'interface',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'interface.chunkselector',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'large-file-limit',
-    default=10 * (2 ** 20),
-)
-coreconfigitem(
-    b'ui',
-    b'logblockedtimes',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'merge',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'mergemarkers',
-    default=b'basic',
-)
-coreconfigitem(
-    b'ui',
-    b'message-output',
-    default=b'stdio',
-)
-coreconfigitem(
-    b'ui',
-    b'nontty',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'origbackuppath',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'paginate',
-    default=True,
-)
-coreconfigitem(
-    b'ui',
-    b'patch',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'portablefilenames',
-    default=b'warn',
-)
-coreconfigitem(
-    b'ui',
-    b'promptecho',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'quiet',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'quietbookmarkmove',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'relative-paths',
-    default=b'legacy',
-)
-coreconfigitem(
-    b'ui',
-    b'remotecmd',
-    default=b'hg',
-)
-coreconfigitem(
-    b'ui',
-    b'report_untrusted',
-    default=True,
-)
-coreconfigitem(
-    b'ui',
-    b'rollback',
-    default=True,
-)
-coreconfigitem(
-    b'ui',
-    b'signal-safe-lock',
-    default=True,
-)
-coreconfigitem(
-    b'ui',
-    b'slash',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'ssh',
-    default=b'ssh',
-)
-coreconfigitem(
-    b'ui',
-    b'ssherrorhint',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'statuscopies',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'strict',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'style',
-    default=b'',
-)
-coreconfigitem(
-    b'ui',
-    b'supportcontact',
-    default=None,
-)
-coreconfigitem(
-    b'ui',
-    b'textwidth',
-    default=78,
-)
-coreconfigitem(
-    b'ui',
-    b'timeout',
-    default=b'600',
-)
-coreconfigitem(
-    b'ui',
-    b'timeout.warn',
-    default=0,
-)
-coreconfigitem(
-    b'ui',
-    b'timestamp-output',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'traceback',
-    default=False,
-)
-coreconfigitem(
-    b'ui',
-    b'tweakdefaults',
-    default=False,
-)
-coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
-coreconfigitem(
-    b'ui',
-    b'verbose',
-    default=False,
-)
-coreconfigitem(
-    b'verify',
-    b'skipflags',
-    default=0,
-)
-coreconfigitem(
-    b'web',
-    b'allowbz2',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'allowgz',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'allow-pull',
-    alias=[(b'web', b'allowpull')],
-    default=True,
-)
-coreconfigitem(
-    b'web',
-    b'allow-push',
-    alias=[(b'web', b'allow_push')],
-    default=list,
-)
-coreconfigitem(
-    b'web',
-    b'allowzip',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'archivesubrepos',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'cache',
-    default=True,
-)
-coreconfigitem(
-    b'web',
-    b'comparisoncontext',
-    default=5,
-)
-coreconfigitem(
-    b'web',
-    b'contact',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'deny_push',
-    default=list,
-)
-coreconfigitem(
-    b'web',
-    b'guessmime',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'hidden',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'labels',
-    default=list,
-)
-coreconfigitem(
-    b'web',
-    b'logoimg',
-    default=b'hglogo.png',
-)
-coreconfigitem(
-    b'web',
-    b'logourl',
-    default=b'https://mercurial-scm.org/',
-)
-coreconfigitem(
-    b'web',
-    b'accesslog',
-    default=b'-',
-)
-coreconfigitem(
-    b'web',
-    b'address',
-    default=b'',
-)
-coreconfigitem(
-    b'web',
-    b'allow-archive',
-    alias=[(b'web', b'allow_archive')],
-    default=list,
-)
-coreconfigitem(
-    b'web',
-    b'allow_read',
-    default=list,
-)
-coreconfigitem(
-    b'web',
-    b'baseurl',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'cacerts',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'certificate',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'collapse',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'csp',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'deny_read',
-    default=list,
-)
-coreconfigitem(
-    b'web',
-    b'descend',
-    default=True,
-)
-coreconfigitem(
-    b'web',
-    b'description',
-    default=b"",
-)
-coreconfigitem(
-    b'web',
-    b'encoding',
-    default=lambda: encoding.encoding,
-)
-coreconfigitem(
-    b'web',
-    b'errorlog',
-    default=b'-',
-)
-coreconfigitem(
-    b'web',
-    b'ipv6',
-    default=False,
-)
-coreconfigitem(
-    b'web',
-    b'maxchanges',
-    default=10,
-)
-coreconfigitem(
-    b'web',
-    b'maxfiles',
-    default=10,
-)
-coreconfigitem(
-    b'web',
-    b'maxshortchanges',
-    default=60,
-)
-coreconfigitem(
-    b'web',
-    b'motd',
-    default=b'',
-)
-coreconfigitem(
-    b'web',
-    b'name',
-    default=dynamicdefault,
-)
-coreconfigitem(
-    b'web',
-    b'port',
-    default=8000,
-)
-coreconfigitem(
-    b'web',
-    b'prefix',
-    default=b'',
-)
-coreconfigitem(
-    b'web',
-    b'push_ssl',
-    default=True,
-)
-coreconfigitem(
-    b'web',
-    b'refreshinterval',
-    default=20,
-)
-coreconfigitem(
-    b'web',
-    b'server-header',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'static',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'staticurl',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'stripes',
-    default=1,
-)
-coreconfigitem(
-    b'web',
-    b'style',
-    default=b'paper',
-)
-coreconfigitem(
-    b'web',
-    b'templates',
-    default=None,
-)
-coreconfigitem(
-    b'web',
-    b'view',
-    default=b'served',
-    experimental=True,
-)
-coreconfigitem(
-    b'worker',
-    b'backgroundclose',
-    default=dynamicdefault,
-)
-# Windows defaults to a limit of 512 open files. A buffer of 128
-# should give us enough headway.
-coreconfigitem(
-    b'worker',
-    b'backgroundclosemaxqueue',
-    default=384,
-)
-coreconfigitem(
-    b'worker',
-    b'backgroundcloseminfilecount',
-    default=2048,
-)
-coreconfigitem(
-    b'worker',
-    b'backgroundclosethreadcount',
-    default=4,
-)
-coreconfigitem(
-    b'worker',
-    b'enabled',
-    default=True,
-)
-coreconfigitem(
-    b'worker',
-    b'numcpus',
-    default=None,
-)
-
-# Rebase related configuration moved to core because other extension are doing
-# strange things. For example, shelve import the extensions to reuse some bit
-# without formally loading it.
-coreconfigitem(
-    b'commands',
-    b'rebase.requiredest',
-    default=False,
-)
-coreconfigitem(
-    b'experimental',
-    b'rebaseskipobsolete',
-    default=True,
-)
-coreconfigitem(
-    b'rebase',
-    b'singletransaction',
-    default=False,
-)
-coreconfigitem(
-    b'rebase',
-    b'experimental.inmemory',
-    default=False,
-)
-
-# This setting controls creation of a rebase_source extra field
-# during rebase. When False, no such field is created. This is
-# useful eg for incrementally converting changesets and then
-# rebasing them onto an existing repo.
-# WARNING: this is an advanced setting reserved for people who know
-# exactly what they are doing. Misuse of this setting can easily
-# result in obsmarker cycles and a vivid headache.
-coreconfigitem(
-    b'rebase',
-    b'store-source',
-    default=True,
-    experimental=True,
-)
+import_configitems_from_file()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/configitems.toml	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,2867 @@
+# configitems.toml - centralized declaration of configuration options
+#
+# This file contains declarations of the core Mercurial configuration options.
+#
+# # Structure
+#
+# items: array of config items
+# templates: mapping of template name to template declaration
+# template-applications: array of template applications
+#
+# # Elements
+#
+# ## Item
+#
+# Declares a core Mercurial option.
+#
+# - section: string (required)
+# - name: string (required)
+# - default-type: boolean, changes how `default` is read
+# - default: any
+# - generic: boolean
+# - priority: integer, only if `generic` is true
+# - alias: list of 2-tuples of strings
+# - experimental: boolean
+# - documentation: string
+# - in_core_extension: string
+#
+# ## Template
+#
+# Declares a group of options to be re-used for multiple sections.
+#
+# - all the same fields as `Item`, except `section` and `name`
+# - `suffix` (string, required)
+#
+# ## Template applications
+#
+# Uses a `Template` to instanciate its options in a given section.
+#
+# - template: string (required, must match a `Template` name)
+# - section: string (required)
+
+[[items]]
+section = "alias"
+name = ".*"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "auth"
+name = "cookiefile"
+
+# bookmarks.pushing: internal hack for discovery
+[[items]]
+section = "bookmarks"
+name = "pushing"
+default-type = "list_type"
+
+# bundle.mainreporoot: internal hack for bundlerepo
+[[items]]
+section = "bundle"
+name = "mainreporoot"
+default = ""
+
+[[items]]
+section = "censor"
+name = "policy"
+default = "abort"
+experimental = true
+
+[[items]]
+section = "chgserver"
+name = "idletimeout"
+default = 3600
+
+[[items]]
+section = "chgserver"
+name = "skiphash"
+default = false
+
+[[items]]
+section = "cmdserver"
+name = "log"
+
+[[items]]
+section = "cmdserver"
+name = "max-log-files"
+default = 7
+
+[[items]]
+section = "cmdserver"
+name = "max-log-size"
+default = "1 MB"
+
+[[items]]
+section = "cmdserver"
+name = "max-repo-cache"
+default = 0
+experimental = true
+
+[[items]]
+section = "cmdserver"
+name = "message-encodings"
+default-type = "list_type"
+
+[[items]]
+section = "cmdserver"
+name = "shutdown-on-interrupt"
+default = true
+
+[[items]]
+section = "cmdserver"
+name = "track-log"
+default-type = "lambda"
+default = [ "chgserver", "cmdserver", "repocache",]
+
+[[items]]
+section = "color"
+name = ".*"
+generic = true
+
+[[items]]
+section = "color"
+name = "mode"
+default = "auto"
+
+[[items]]
+section = "color"
+name = "pagermode"
+default-type = "dynamic"
+
+[[items]]
+section = "command-templates"
+name = "graphnode"
+alias = [["ui", "graphnodetemplate"]]
+
+[[items]]
+section = "command-templates"
+name = "log"
+alias = [["ui", "logtemplate"]]
+
+[[items]]
+section = "command-templates"
+name = "mergemarker"
+default = '{node|short} {ifeq(tags, "tip", "", ifeq(tags, "", "", "{tags} "))}{if(bookmarks, "{bookmarks} ")}{ifeq(branch, "default", "", "{branch} ")}- {author|user}: {desc|firstline}'
+alias = [["ui", "mergemarkertemplate"]]
+
+[[items]]
+section = "command-templates"
+name = "oneline-summary"
+
+[[items]]
+section = "command-templates"
+name = "oneline-summary.*"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "command-templates"
+name = "pre-merge-tool-output"
+alias = [["ui", "pre-merge-tool-output-template"]]
+
+[[items]]
+section = "commands"
+name = "commit.post-status"
+default = false
+
+[[items]]
+section = "commands"
+name = "grep.all-files"
+default = false
+experimental = true
+
+[[items]]
+section = "commands"
+name = "merge.require-rev"
+default = false
+
+[[items]]
+section = "commands"
+name = "push.require-revs"
+default = false
+
+# Rebase related configuration moved to core because other extension are doing
+# strange things. For example, shelve import the extensions to reuse some bit
+# without formally loading it.
+[[items]]
+section = "commands"
+name = "rebase.requiredest"
+default = false
+
+[[items]]
+section = "commands"
+name = "resolve.confirm"
+default = false
+
+[[items]]
+section = "commands"
+name = "resolve.explicit-re-merge"
+default = false
+
+[[items]]
+section = "commands"
+name = "resolve.mark-check"
+default = "none"
+
+[[items]]
+section = "commands"
+name = "show.aliasprefix"
+default-type = "list_type"
+
+[[items]]
+section = "commands"
+name = "status.relative"
+default = false
+
+[[items]]
+section = "commands"
+name = "status.skipstates"
+default = []
+experimental = true
+
+[[items]]
+section = "commands"
+name = "status.terse"
+default = ""
+
+[[items]]
+section = "commands"
+name = "status.verbose"
+default = false
+
+[[items]]
+section = "commands"
+name = "update.check"
+
+[[items]]
+section = "commands"
+name = "update.requiredest"
+default = false
+
+[[items]]
+section = "committemplate"
+name = ".*"
+generic = true
+
+[[items]]
+section = "convert"
+name = "bzr.saverev"
+default = true
+
+[[items]]
+section = "convert"
+name = "cvsps.cache"
+default = true
+
+[[items]]
+section = "convert"
+name = "cvsps.fuzz"
+default = 60
+
+[[items]]
+section = "convert"
+name = "cvsps.logencoding"
+
+[[items]]
+section = "convert"
+name = "cvsps.mergefrom"
+
+[[items]]
+section = "convert"
+name = "cvsps.mergeto"
+
+[[items]]
+section = "convert"
+name = "git.committeractions"
+default-type = "lambda"
+default = [ "messagedifferent",]
+
+[[items]]
+section = "convert"
+name = "git.extrakeys"
+default-type = "list_type"
+
+[[items]]
+section = "convert"
+name = "git.findcopiesharder"
+default = false
+
+[[items]]
+section = "convert"
+name = "git.remoteprefix"
+default = "remote"
+
+[[items]]
+section = "convert"
+name = "git.renamelimit"
+default = 400
+
+[[items]]
+section = "convert"
+name = "git.saverev"
+default = true
+
+[[items]]
+section = "convert"
+name = "git.similarity"
+default = 50
+
+[[items]]
+section = "convert"
+name = "git.skipsubmodules"
+default = false
+
+[[items]]
+section = "convert"
+name = "hg.clonebranches"
+default = false
+
+[[items]]
+section = "convert"
+name = "hg.ignoreerrors"
+default = false
+
+[[items]]
+section = "convert"
+name = "hg.preserve-hash"
+default = false
+
+[[items]]
+section = "convert"
+name = "hg.revs"
+
+[[items]]
+section = "convert"
+name = "hg.saverev"
+default = false
+
+[[items]]
+section = "convert"
+name = "hg.sourcename"
+
+[[items]]
+section = "convert"
+name = "hg.startrev"
+
+[[items]]
+section = "convert"
+name = "hg.tagsbranch"
+default = "default"
+
+[[items]]
+section = "convert"
+name = "hg.usebranchnames"
+default = true
+
+[[items]]
+section = "convert"
+name = "ignoreancestorcheck"
+default = false
+experimental = true
+
+[[items]]
+section = "convert"
+name = "localtimezone"
+default = false
+
+[[items]]
+section = "convert"
+name = "p4.encoding"
+default-type = "dynamic"
+
+[[items]]
+section = "convert"
+name = "p4.startrev"
+default = 0
+
+[[items]]
+section = "convert"
+name = "skiptags"
+default = false
+
+[[items]]
+section = "convert"
+name = "svn.branches"
+
+[[items]]
+section = "convert"
+name = "svn.dangerous-set-commit-dates"
+default = false
+
+[[items]]
+section = "convert"
+name = "svn.debugsvnlog"
+default = true
+
+[[items]]
+section = "convert"
+name = "svn.startrev"
+default = 0
+
+[[items]]
+section = "convert"
+name = "svn.tags"
+
+[[items]]
+section = "convert"
+name = "svn.trunk"
+
+[[items]]
+section = "debug"
+name = "bundling-stats"
+default = false
+documentation = "Display extra information about the bundling process."
+
+[[items]]
+section = "debug"
+name = "dirstate.delaywrite"
+default = 0
+
+[[items]]
+section = "debug"
+name = "revlog.debug-delta"
+default = false
+
+[[items]]
+section = "debug"
+name = "revlog.verifyposition.changelog"
+default = ""
+
+[[items]]
+section = "debug"
+name = "unbundling-stats"
+default = false
+documentation = "Display extra information about the unbundling process."
+
+[[items]]
+section = "defaults"
+name = ".*"
+generic = true
+
+[[items]]
+section = "devel"
+name = "all-warnings"
+default = false
+
+[[items]]
+section = "devel"
+name = "bundle.delta"
+default = ""
+
+[[items]]
+section = "devel"
+name = "bundle2.debug"
+default = false
+
+[[items]]
+section = "devel"
+name = "cache-vfs"
+
+[[items]]
+section = "devel"
+name = "check-locks"
+default = false
+
+[[items]]
+section = "devel"
+name = "check-relroot"
+default = false
+
+[[items]]
+section = "devel"
+name = "copy-tracing.multi-thread"
+default = true
+
+# Track copy information for all files, not just "added" ones (very slow)
+[[items]]
+section = "devel"
+name = "copy-tracing.trace-all-files"
+default = false
+
+[[items]]
+section = "devel"
+name = "debug.abort-update"
+default = false
+documentation = """If true, then any merge with the working copy, \
+e.g. [hg update], will be aborted after figuring out what needs to be done, \
+but before spawning the parallel worker."""
+
+[[items]]
+section = "devel"
+name = "debug.copies"
+default = false
+
+[[items]]
+section = "devel"
+name = "debug.extensions"
+default = false
+
+[[items]]
+section = "devel"
+name = "debug.peer-request"
+default = false
+
+[[items]]
+section = "devel"
+name = "debug.repo-filters"
+default = false
+
+[[items]]
+section = "devel"
+name = "default-date"
+
+[[items]]
+section = "devel"
+name = "deprec-warn"
+default = false
+
+# possible values:
+# - auto (the default)
+# - force-append
+# - force-new
+[[items]]
+section = "devel"
+name = "dirstate.v2.data_update_mode"
+default = "auto"
+
+[[items]]
+section = "devel"
+name = "disableloaddefaultcerts"
+default = false
+
+[[items]]
+section = "devel"
+name = "discovery.exchange-heads"
+default = true
+documentation = """If false, the discovery will not start with remote \
+head fetching and local head querying."""
+
+[[items]]
+section = "devel"
+name = "discovery.grow-sample"
+default = true
+documentation = """If false, the sample size used in set discovery \
+will not be increased through the process."""
+
+[[items]]
+section = "devel"
+name = "discovery.grow-sample.dynamic"
+default = true
+documentation = """If true, the default, the sample size is adapted to the shape \
+of the undecided set. It is set to the max of:
+`<target-size>, len(roots(undecided)), len(heads(undecided))`"""
+
+[[items]]
+section = "devel"
+name = "discovery.grow-sample.rate"
+default = 1.05
+documentation = "Controls the rate at which the sample grows."
+
+[[items]]
+section = "devel"
+name = "discovery.randomize"
+default = true
+documentation = """If false, random samplings during discovery are deterministic. \
+It is meant for integration tests."""
+
+[[items]]
+section = "devel"
+name = "discovery.sample-size"
+default = 200
+documentation = "Controls the initial size of the discovery sample."
+
+[[items]]
+section = "devel"
+name = "discovery.sample-size.initial"
+default = 100
+documentation = "Controls the initial size of the discovery for initial change."
+
+[[items]]
+section = "devel"
+name = "legacy.exchange"
+default-type = "list_type"
+
+[[items]]
+section = "devel"
+name = "persistent-nodemap"
+default = false
+documentation = """When true, revlogs use a special reference version of the \
+nodemap, that is not performant but is "known" to behave properly."""
+
+[[items]]
+section = "devel"
+name = "server-insecure-exact-protocol"
+default = ""
+
+[[items]]
+section = "devel"
+name = "servercafile"
+default = ""
+
+[[items]]
+section = "devel"
+name = "serverexactprotocol"
+default = ""
+
+[[items]]
+section = "devel"
+name = "serverrequirecert"
+default = false
+
+[[items]]
+section = "devel"
+name = "strip-obsmarkers"
+default = true
+
+[[items]]
+section = 'devel'
+name = 'sync.status.pre-dirstate-write-file'
+documentation = """
+Makes the status algorithm wait for the existence of this file \
+(or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout` \
+seconds) before taking the lock and writing the dirstate. \
+Status signals that it's ready to wait by creating a file \
+with the same name + `.waiting`. \
+Useful when testing race conditions."""
+
+[[items]]
+section = 'devel'
+name = 'sync.status.pre-dirstate-write-file-timeout'
+default=2
+
+[[items]]
+section = 'devel'
+name = 'sync.dirstate.post-docket-read-file'
+
+[[items]]
+section = 'devel'
+name = 'sync.dirstate.post-docket-read-file-timeout'
+default=2
+
+[[items]]
+section = 'devel'
+name = 'sync.dirstate.pre-read-file'
+
+[[items]]
+section = 'devel'
+name = 'sync.dirstate.pre-read-file-timeout'
+default=2
+
+[[items]]
+section = "devel"
+name = "user.obsmarker"
+
+[[items]]
+section = "devel"
+name = "warn-config"
+
+[[items]]
+section = "devel"
+name = "warn-config-default"
+
+[[items]]
+section = "devel"
+name = "warn-config-unknown"
+
+[[items]]
+section = "devel"
+name = "warn-empty-changegroup"
+default = false
+
+[[items]]
+section = "diff"
+name = "merge"
+default = false
+experimental = true
+
+[[items]]
+section = "email"
+name = "bcc"
+
+[[items]]
+section = "email"
+name = "cc"
+
+[[items]]
+section = "email"
+name = "charsets"
+default-type = "list_type"
+
+[[items]]
+section = "email"
+name = "from"
+
+[[items]]
+section = "email"
+name = "method"
+default = "smtp"
+
+[[items]]
+section = "email"
+name = "reply-to"
+
+[[items]]
+section = "email"
+name = "to"
+
+[[items]]
+section = "experimental"
+name = "archivemetatemplate"
+default-type = "dynamic"
+
+[[items]]
+section = "experimental"
+name = "auto-publish"
+default = "publish"
+
+[[items]]
+section = "experimental"
+name = "bundle-phases"
+default = false
+
+[[items]]
+section = "experimental"
+name = "bundle2-advertise"
+default = true
+
+[[items]]
+section = "experimental"
+name = "bundle2-output-capture"
+default = false
+
+[[items]]
+section = "experimental"
+name = "bundle2.pushback"
+default = false
+
+[[items]]
+section = "experimental"
+name = "bundle2lazylocking"
+default = false
+
+[[items]]
+section = "experimental"
+name = "bundlecomplevel"
+
+[[items]]
+section = "experimental"
+name = "bundlecomplevel.bzip2"
+
+[[items]]
+section = "experimental"
+name = "bundlecomplevel.gzip"
+
+[[items]]
+section = "experimental"
+name = "bundlecomplevel.none"
+
+[[items]]
+section = "experimental"
+name = "bundlecomplevel.zstd"
+
+[[items]]
+section = "experimental"
+name = "bundlecompthreads"
+
+[[items]]
+section = "experimental"
+name = "bundlecompthreads.bzip2"
+
+[[items]]
+section = "experimental"
+name = "bundlecompthreads.gzip"
+
+[[items]]
+section = "experimental"
+name = "bundlecompthreads.none"
+
+[[items]]
+section = "experimental"
+name = "bundlecompthreads.zstd"
+
+[[items]]
+section = "experimental"
+name = "changegroup3"
+default = true
+
+[[items]]
+section = "experimental"
+name = "changegroup4"
+default = false
+
+# might remove rank configuration once the computation has no impact
+[[items]]
+section = "experimental"
+name = "changelog-v2.compute-rank"
+default = true
+
+[[items]]
+section = "experimental"
+name = "cleanup-as-archived"
+default = false
+
+[[items]]
+section = "experimental"
+name = "clientcompressionengines"
+default-type = "list_type"
+
+[[items]]
+section = "experimental"
+name = "copies.read-from"
+default = "filelog-only"
+
+[[items]]
+section = "experimental"
+name = "copies.write-to"
+default = "filelog-only"
+
+[[items]]
+section = "experimental"
+name = "copytrace"
+default = "on"
+
+[[items]]
+section = "experimental"
+name = "copytrace.movecandidateslimit"
+default = 100
+
+[[items]]
+section = "experimental"
+name = "copytrace.sourcecommitlimit"
+default = 100
+
+[[items]]
+section = "experimental"
+name = "crecordtest"
+
+[[items]]
+section = "experimental"
+name = "directaccess"
+default = false
+
+[[items]]
+section = "experimental"
+name = "directaccess.revnums"
+default = false
+
+[[items]]
+section = "experimental"
+name = "editortmpinhg"
+default = false
+
+[[items]]
+section = "experimental"
+name = "evolution"
+default-type = "list_type"
+
+[[items]]
+section = "experimental"
+name = "evolution.allowdivergence"
+default = false
+alias = [["experimental", "allowdivergence"]]
+
+[[items]]
+section = "experimental"
+name = "evolution.allowunstable"
+
+[[items]]
+section = "experimental"
+name = "evolution.bundle-obsmarker"
+default = false
+
+[[items]]
+section = "experimental"
+name = "evolution.bundle-obsmarker:mandatory"
+default = true
+
+[[items]]
+section = "experimental"
+name = "evolution.createmarkers"
+
+[[items]]
+section = "experimental"
+name = "evolution.effect-flags"
+default = true
+alias = [["experimental", "effect-flags"]]
+
+[[items]]
+section = "experimental"
+name = "evolution.exchange"
+
+[[items]]
+section = "experimental"
+name = "evolution.report-instabilities"
+default = true
+
+[[items]]
+section = "experimental"
+name = "evolution.track-operation"
+default = true
+
+[[items]]
+section = "experimental"
+name = "exportableenviron"
+default-type = "list_type"
+
+[[items]]
+section = "experimental"
+name = "extendedheader.index"
+
+[[items]]
+section = "experimental"
+name = "extendedheader.similarity"
+default = false
+
+[[items]]
+section = "experimental"
+name = "extra-filter-revs"
+documentation = """Repo-level config to prevent a revset from being visible.
+The target use case is to use `share` to expose different subsets of the same \
+repository, especially server side. See also `server.view`."""
+
+[[items]]
+section = "experimental"
+name = "graphshorten"
+default = false
+
+[[items]]
+section = "experimental"
+name = "graphstyle.grandparent"
+default-type = "dynamic"
+
+[[items]]
+section = "experimental"
+name = "graphstyle.missing"
+default-type = "dynamic"
+
+[[items]]
+section = "experimental"
+name = "graphstyle.parent"
+default-type = "dynamic"
+
+[[items]]
+section = "experimental"
+name = "hook-track-tags"
+default = false
+
+[[items]]
+section = "experimental"
+name = "httppostargs"
+default = false
+
+[[items]]
+section = "experimental"
+name = "log.topo"
+default = false
+
+[[items]]
+section = "experimental"
+name = "maxdeltachainspan"
+default = -1
+
+[[items]]
+section = "experimental"
+name = "merge-track-salvaged"
+default = false
+documentation = """Tracks files which were undeleted (merge might delete them \
+but we explicitly kept/undeleted them) and creates new filenodes for them."""
+
+[[items]]
+section = "experimental"
+name = "merge.checkpathconflicts"
+default = false
+
+[[items]]
+section = "experimental"
+name = "mmapindexthreshold"
+
+[[items]]
+section = "experimental"
+name = "narrow"
+default = false
+
+[[items]]
+section = "experimental"
+name = "nointerrupt"
+default = false
+
+[[items]]
+section = "experimental"
+name = "nointerrupt-interactiveonly"
+default = true
+
+[[items]]
+section = "experimental"
+name = "nonnormalparanoidcheck"
+default = false
+
+[[items]]
+section = "experimental"
+name = "obsmarkers-exchange-debug"
+default = false
+
+[[items]]
+section = "experimental"
+name = "rebaseskipobsolete"
+default = true
+
+[[items]]
+section = "experimental"
+name = "remotenames"
+default = false
+
+[[items]]
+section = "experimental"
+name = "removeemptydirs"
+default = true
+
+[[items]]
+section = "experimental"
+name = "revert.interactive.select-to-keep"
+default = false
+
+[[items]]
+section = "experimental"
+name = "revisions.disambiguatewithin"
+
+[[items]]
+section = "experimental"
+name = "revisions.prefixhexnode"
+default = false
+
+# "out of experimental" todo list.
+#
+# * include management of a persistent nodemap in the main docket
+# * enforce a "no-truncate" policy for mmap safety
+#      - for censoring operation
+#      - for stripping operation
+#      - for rollback operation
+# * proper streaming (race free) of the docket file
+# * track garbage data to evemtually allow rewriting -existing- sidedata.
+# * Exchange-wise, we will also need to do something more efficient than
+#   keeping references to the affected revlogs, especially memory-wise when
+#   rewriting sidedata.
+# * introduce a proper solution to reduce the number of filelog related files.
+# * use caching for reading sidedata (similar to what we do for data).
+# * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
+# * Improvement to consider
+#   - avoid compression header in chunk using the default compression?
+#   - forbid "inline" compression mode entirely?
+#   - split the data offset and flag field (the 2 bytes save are mostly trouble)
+#   - keep track of uncompressed -chunk- size (to preallocate memory better)
+#   - keep track of chain base or size (probably not that useful anymore)
+[[items]]
+section = "experimental"
+name = "revlogv2"
+
+[[items]]
+section = "experimental"
+name = "rust.index"
+default = false
+
+[[items]]
+section = "experimental"
+name = "server.allow-hidden-access"
+default-type = "list_type"
+
+[[items]]
+section = "experimental"
+name = "server.filesdata.recommended-batch-size"
+default = 50000
+
+[[items]]
+section = "experimental"
+name = "server.manifestdata.recommended-batch-size"
+default = 100000
+
+[[items]]
+section = "experimental"
+name = "server.stream-narrow-clones"
+default = false
+
+[[items]]
+section = "experimental"
+name = "single-head-per-branch"
+default = false
+
+[[items]]
+section = "experimental"
+name = "single-head-per-branch:account-closed-heads"
+default = false
+
+[[items]]
+section = "experimental"
+name = "single-head-per-branch:public-changes-only"
+default = false
+
+[[items]]
+section = "experimental"
+name = "sparse-read"
+default = false
+
+[[items]]
+section = "experimental"
+name = "sparse-read.density-threshold"
+default = 0.5
+
+[[items]]
+section = "experimental"
+name = "sparse-read.min-gap-size"
+default = "65K"
+
+[[items]]
+section = "experimental"
+name = "revlog.uncompressed-cache.enabled"
+default = true
+experimental = true
+documentation = """Enable some caching of uncompressed chunk, greatly boosting
+performance at the cost of memory usage."""
+
+[[items]]
+section = "experimental"
+name = "revlog.uncompressed-cache.factor"
+default = 4
+experimental = true
+documentation = """The size of the cache compared to the largest revision seen."""
+
+[[items]]
+section = "experimental"
+name = "revlog.uncompressed-cache.count"
+default = 10000
+experimental = true
+documentation = """The number of chunk cached."""
+
+[[items]]
+section = "experimental"
+name = "stream-v3"
+default = false
+
+[[items]]
+section = "experimental"
+name = "treemanifest"
+default = false
+
+[[items]]
+section = "experimental"
+name = "update.atomic-file"
+default = false
+
+[[items]]
+section = "experimental"
+name = "web.full-garbage-collection-rate"
+default = 1  # still forcing a full collection on each request
+
+[[items]]
+section = "experimental"
+name = "worker.repository-upgrade"
+default = false
+
+[[items]]
+section = "experimental"
+name = "worker.wdir-get-thread-safe"
+default = false
+
+[[items]]
+section = "experimental"
+name = "xdiff"
+default = false
+
+[[items]]
+section = "extdata"
+name = ".*"
+generic = true
+
+[[items]]
+section = "extensions"
+name = "[^:]*"
+generic = true
+
+[[items]]
+section = "extensions"
+name = "[^:]*:required"
+default = false
+generic = true
+
+[[items]]
+section = "format"
+name = "bookmarks-in-store"
+default = false
+
+[[items]]
+section = "format"
+name = "chunkcachesize"
+experimental = true
+
+[[items]]
+section = "format"
+name = "dotencode"
+default = true
+
+# The interaction between the archived phase and obsolescence markers needs to
+# be sorted out before wider usage of this are to be considered.
+#
+# At the time this message is written, behavior when archiving obsolete
+# changeset differ significantly from stripping. As part of stripping, we also
+# remove the obsolescence marker associated to the stripped changesets,
+# revealing the precedecessors changesets when applicable. When archiving, we
+# don't touch the obsolescence markers, keeping everything hidden. This can
+# result in quite confusing situation for people combining exchanging draft
+# with the archived phases. As some markers needed by others may be skipped
+# during exchange.
+[[items]]
+section = "format"
+name = "exp-archived-phase"
+default = false
+experimental = true
+
+# Experimental TODOs:
+#
+# * Same as for revlogv2 (but for the reduction of the number of files)
+# * Actually computing the rank of changesets
+# * Improvement to investigate
+#   - storing .hgtags fnode
+#   - storing branch related identifier
+[[items]]
+section = "format"
+name = "exp-use-changelog-v2"
+experimental = true
+
+[[items]]
+section = "format"
+name = "exp-use-copies-side-data-changeset"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "generaldelta"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "manifestcachesize"
+experimental = true
+
+[[items]]
+section = "format"
+name = "maxchainlen"
+default-type = "dynamic"
+experimental = true
+
+[[items]]
+section = "format"
+name = "obsstore-version"
+
+[[items]]
+section = "format"
+name = "revlog-compression"
+default-type = "lambda"
+alias = [["experimental", "format.compression"]]
+default = [ "zstd", "zlib",]
+
+[[items]]
+section = "format"
+name = "sparse-revlog"
+default = true
+
+[[items]]
+section = "format"
+name = "use-dirstate-tracked-hint"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-dirstate-tracked-hint.version"
+default = 1
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-dirstate-v2"
+default = false
+alias = [["format", "exp-rc-dirstate-v2"]]
+experimental = true
+documentation = """Enables dirstate-v2 format *when creating a new repository*.
+Which format to use for existing repos is controlled by `.hg/requires`."""
+
+[[items]]
+section = "format"
+name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet"
+default = false
+experimental = true
+
+# Having this on by default means we are confident about the scaling of phases.
+# This is not garanteed to be the case at the time this message is written.
+[[items]]
+section = "format"
+name = "use-internal-phase"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-persistent-nodemap"
+default-type = "dynamic"
+
+[[items]]
+section = "format"
+name = "use-share-safe"
+default = true
+
+[[items]]
+section = "format"
+name = "use-share-safe.automatic-upgrade-of-mismatching-repositories"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet"
+default = false
+experimental = true
+
+[[items]]
+section = "format"
+name = "usefncache"
+default = true
+
+[[items]]
+section = "format"
+name = "usegeneraldelta"
+default = true
+
+[[items]]
+section = "format"
+name = "usestore"
+default = true
+
+[[items]]
+section = "fsmonitor"
+name = "warn_update_file_count"
+default = 50000
+
+[[items]]
+section = "fsmonitor"
+name = "warn_update_file_count_rust"
+default = 400000
+
+[[items]]
+section = "fsmonitor"
+name = "warn_when_unused"
+default = true
+
+[[items]]
+section = "help"
+name = 'hidden-command\..*'
+default = false
+generic = true
+
+[[items]]
+section = "help"
+name = 'hidden-topic\..*'
+default = false
+generic = true
+
+[[items]]
+section = "hgweb-paths"
+name = ".*"
+default-type = "list_type"
+generic = true
+
+[[items]]
+section = "hooks"
+name = ".*:run-with-plain"
+default = true
+generic = true
+
+[[items]]
+section = "hooks"
+name = "[^:]*"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "hostfingerprints"
+name = ".*"
+default-type = "list_type"
+generic = true
+
+[[items]]
+section = "hostsecurity"
+name = ".*:ciphers$"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "hostsecurity"
+name = ".*:fingerprints$"
+default-type = "list_type"
+generic = true
+
+[[items]]
+section = "hostsecurity"
+name = ".*:minimumprotocol$"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "hostsecurity"
+name = ".*:verifycertsfile$"
+generic = true
+
+[[items]]
+section = "hostsecurity"
+name = "ciphers"
+
+[[items]]
+section = "hostsecurity"
+name = "minimumprotocol"
+default-type = "dynamic"
+
+[[items]]
+section = "http"
+name = "timeout"
+
+[[items]]
+section = "http_proxy"
+name = "always"
+default = false
+
+[[items]]
+section = "http_proxy"
+name = "host"
+
+[[items]]
+section = "http_proxy"
+name = "no"
+default-type = "list_type"
+
+[[items]]
+section = "http_proxy"
+name = "passwd"
+
+[[items]]
+section = "http_proxy"
+name = "user"
+
+[[items]]
+section = "logtoprocess"
+name = "command"
+
+[[items]]
+section = "logtoprocess"
+name = "commandexception"
+
+[[items]]
+section = "logtoprocess"
+name = "commandfinish"
+
+[[items]]
+section = "logtoprocess"
+name = "develwarn"
+
+[[items]]
+section = "logtoprocess"
+name = "uiblocked"
+
+[[items]]
+section = "merge"
+name = "checkignored"
+default = "abort"
+
+[[items]]
+section = "merge"
+name = "checkunknown"
+default = "abort"
+
+[[items]]
+section = "merge"
+name = "disable-partial-tools"
+default = false
+experimental = true
+
+[[items]]
+section = "merge"
+name = "followcopies"
+default = true
+
+[[items]]
+section = "merge"
+name = "on-failure"
+default = "continue"
+
+[[items]]
+section = "merge"
+name = "preferancestor"
+default-type = "lambda"
+default = ["*"]
+experimental = true
+
+[[items]]
+section = "merge"
+name = "strict-capability-check"
+default = false
+
+[[items]]
+section = "merge-tools"
+name = ".*"
+generic = true
+
+[[items]]
+section = "merge-tools"
+name = '.*\.args$'
+default = "$local $base $other"
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.binary$'
+default = false
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.check$'
+default-type = "list_type"
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.checkchanged$'
+default = false
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.executable$'
+default-type = "dynamic"
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.fixeol$'
+default = false
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.gui$'
+default = false
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.mergemarkers$'
+default = "basic"
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.mergemarkertemplate$'  # take from command-templates.mergemarker
+default-type = "dynamic"
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.premerge$'
+default-type = "dynamic"
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.priority$'
+default = 0
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.regappend$'
+default = ""
+generic = true
+priority = -1
+
+[[items]]
+section = "merge-tools"
+name = '.*\.symlink$'
+default = false
+generic = true
+priority = -1
+
+[[items]]
+section = "pager"
+name = "attend-.*"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "pager"
+name = "ignore"
+default-type = "list_type"
+
+[[items]]
+section = "pager"
+name = "pager"
+default-type = "dynamic"
+
+[[items]]
+section = "partial-merge-tools"
+name = ".*"
+generic = true
+experimental = true
+
+[[items]]
+section = "partial-merge-tools"
+name = '.*\.args'
+default = "$local $base $other"
+generic = true
+priority = -1
+experimental = true
+
+[[items]]
+section = "partial-merge-tools"
+name = '.*\.disable'
+default = false
+generic = true
+priority = -1
+experimental = true
+
+[[items]]
+section = "partial-merge-tools"
+name = '.*\.executable$'
+default-type = "dynamic"
+generic = true
+priority = -1
+experimental = true
+
+[[items]]
+section = "partial-merge-tools"
+name = '.*\.order'
+default = 0
+generic = true
+priority = -1
+experimental = true
+
+[[items]]
+section = "partial-merge-tools"
+name = '.*\.patterns'
+default-type = "dynamic"
+generic = true
+priority = -1
+experimental = true
+
+[[items]]
+section = "patch"
+name = "eol"
+default = "strict"
+
+[[items]]
+section = "patch"
+name = "fuzz"
+default = 2
+
+[[items]]
+section = "paths"
+name = "[^:]*"
+generic = true
+
+[[items]]
+section = "paths"
+name = ".*:bookmarks.mode"
+default = "default"
+generic = true
+
+[[items]]
+section = "paths"
+name = ".*:multi-urls"
+default = false
+generic = true
+
+[[items]]
+section = "paths"
+name = ".*:pulled-delta-reuse-policy"
+generic = true
+
+[[items]]
+section = "paths"
+name = ".*:pushrev"
+generic = true
+
+[[items]]
+section = "paths"
+name = ".*:pushurl"
+generic = true
+
+[[items]]
+section = "paths"
+name = "default"
+
+[[items]]
+section = "paths"
+name = "default-push"
+
+[[items]]
+section = "phases"
+name = "checksubrepos"
+default = "follow"
+
+[[items]]
+section = "phases"
+name = "new-commit"
+default = "draft"
+
+[[items]]
+section = "phases"
+name = "publish"
+default = true
+
+[[items]]
+section = "profiling"
+name = "enabled"
+default = false
+
+[[items]]
+section = "profiling"
+name = "format"
+default = "text"
+
+[[items]]
+section = "profiling"
+name = "freq"
+default = 1000
+
+[[items]]
+section = "profiling"
+name = "limit"
+default = 30
+
+[[items]]
+section = "profiling"
+name = "nested"
+default = 0
+
+[[items]]
+section = "profiling"
+name = "output"
+
+[[items]]
+section = "profiling"
+name = "showmax"
+default = 0.999
+
+[[items]]
+section = "profiling"
+name = "showmin"
+default-type = "dynamic"
+
+[[items]]
+section = "profiling"
+name = "showtime"
+default = true
+
+[[items]]
+section = "profiling"
+name = "sort"
+default = "inlinetime"
+
+[[items]]
+section = "profiling"
+name = "statformat"
+default = "hotpath"
+
+[[items]]
+section = "profiling"
+name = "time-track"
+default-type = "dynamic"
+
+[[items]]
+section = "profiling"
+name = "type"
+default = "stat"
+
+[[items]]
+section = "progress"
+name = "assume-tty"
+default = false
+
+[[items]]
+section = "progress"
+name = "changedelay"
+default = 1
+
+[[items]]
+section = "progress"
+name = "clear-complete"
+default = true
+
+[[items]]
+section = "progress"
+name = "debug"
+default = false
+
+[[items]]
+section = "progress"
+name = "delay"
+default = 3
+
+[[items]]
+section = "progress"
+name = "disable"
+default = false
+
+[[items]]
+section = "progress"
+name = "estimateinterval"
+default = 60.0
+
+[[items]]
+section = "progress"
+name = "format"
+default-type = "lambda"
+default = [ "topic", "bar", "number", "estimate",]
+
+[[items]]
+section = "progress"
+name = "refresh"
+default = 0.1
+
+[[items]]
+section = "progress"
+name = "width"
+default-type = "dynamic"
+
+[[items]]
+section = "pull"
+name = "confirm"
+default = false
+
+[[items]]
+section = "push"
+name = "pushvars.server"
+default = false
+
+[[items]]
+section = "rebase"
+name = "experimental.inmemory"
+default = false
+
+[[items]]
+section = "rebase"
+name = "singletransaction"
+default = false
+
+[[items]]
+section = "rebase"
+name = "store-source"
+default = true
+experimental = true
+documentation = """Controls creation of a `rebase_source` extra field during rebase.
+When false, no such field is created. This is useful e.g. for incrementally \
+converting changesets and then rebasing them onto an existing repo.
+WARNING: this is an advanced setting reserved for people who know \
+exactly what they are doing. Misuse of this setting can easily \
+result in obsmarker cycles and a vivid headache."""
+
+[[items]]
+section = "rewrite"
+name = "backup-bundle"
+default = true
+alias = [["ui", "history-editing-backup"]]
+
+[[items]]
+section = "rewrite"
+name = "empty-successor"
+default = "skip"
+experimental = true
+
+[[items]]
+section = "rewrite"
+name = "update-timestamp"
+default = false
+
+[[items]]
+section = "rhg"
+name = "cat"
+default = true
+experimental = true
+documentation = """rhg cat has some quirks that need to be ironed out. \
+In particular, the `-r` argument accepts a partial hash, but does not \
+correctly resolve `abcdef` as a potential bookmark, tag or branch name."""
+
+[[items]]
+section = "rhg"
+name = "fallback-exectutable"
+experimental = true
+
+[[items]]
+section = "rhg"
+name = "fallback-immediately"
+default = false
+experimental = true
+
+[[items]]
+section = "rhg"
+name = "ignored-extensions"
+default-type = "list_type"
+experimental = true
+
+[[items]]
+section = "rhg"
+name = "on-unsupported"
+default = "abort"
+experimental = true
+
+[[items]]
+section = "server"
+name = "bookmarks-pushkey-compat"
+default = true
+
+[[items]]
+section = "server"
+name = "bundle1"
+default = true
+
+[[items]]
+section = "server"
+name = "bundle1.pull"
+
+[[items]]
+section = "server"
+name = "bundle1.push"
+
+[[items]]
+section = "server"
+name = "bundle1gd"
+
+[[items]]
+section = "server"
+name = "bundle1gd.pull"
+
+[[items]]
+section = "server"
+name = "bundle1gd.push"
+
+[[items]]
+section = "server"
+name = "bundle2.stream"
+default = true
+alias = [["experimental", "bundle2.stream"]]
+
+[[items]]
+section = "server"
+name = "compressionengines"
+default-type = "list_type"
+
+[[items]]
+section = "server"
+name = "concurrent-push-mode"
+default = "check-related"
+
+[[items]]
+section = "server"
+name = "disablefullbundle"
+default = false
+
+[[items]]
+section = "server"
+name = "maxhttpheaderlen"
+default = 1024
+
+[[items]]
+section = "server"
+name = "preferuncompressed"
+default = false
+
+[[items]]
+section = "server"
+name = "pullbundle"
+default = true
+
+[[items]]
+section = "server"
+name = "streamunbundle"
+default = false
+
+[[items]]
+section = "server"
+name = "uncompressed"
+default = true
+
+[[items]]
+section = "server"
+name = "uncompressedallowsecret"
+default = false
+
+[[items]]
+section = "server"
+name = "validate"
+default = false
+
+[[items]]
+section = "server"
+name = "view"
+default = "served"
+
+[[items]]
+section = "server"
+name = "zliblevel"
+default = -1
+
+[[items]]
+section = "server"
+name = "zstdlevel"
+default = 3
+
+[[items]]
+section = "share"
+name = "pool"
+
+[[items]]
+section = "share"
+name = "poolnaming"
+default = "identity"
+
+[[items]]
+section = "share"
+name = "safe-mismatch.source-not-safe"
+default = "abort"
+
+[[items]]
+section = "share"
+name = "safe-mismatch.source-not-safe.warn"
+default = true
+
+[[items]]
+section = "share"
+name = "safe-mismatch.source-not-safe:verbose-upgrade"
+default = true
+
+[[items]]
+section = "share"
+name = "safe-mismatch.source-safe"
+default = "abort"
+
+[[items]]
+section = "share"
+name = "safe-mismatch.source-safe.warn"
+default = true
+
+[[items]]
+section = "share"
+name = "safe-mismatch.source-safe:verbose-upgrade"
+default = true
+
+[[items]]
+section = "shelve"
+name = "maxbackups"
+default = 10
+
+[[items]]
+section = "shelve"
+name = "store"
+default = "internal"
+experimental = true
+
+[[items]]
+section = "smtp"
+name = "host"
+
+[[items]]
+section = "smtp"
+name = "local_hostname"
+
+[[items]]
+section = "smtp"
+name = "password"
+
+[[items]]
+section = "smtp"
+name = "port"
+default-type = "dynamic"
+
+[[items]]
+section = "smtp"
+name = "tls"
+default = "none"
+
+[[items]]
+section = "smtp"
+name = "username"
+
+[[items]]
+section = "sparse"
+name = "missingwarning"
+default = true
+experimental = true
+
+[[items]]
+section = "storage"
+name = "dirstate-v2.slow-path"
+default = "abort"
+experimental = true  # experimental as long as format.use-dirstate-v2 is.
+
+[[items]]
+section = "storage"
+name = "new-repo-backend"
+default = "revlogv1"
+experimental = true
+
+[[items]]
+section = "storage"
+name = "revlog.delta-parent-search.candidate-group-chunk-size"
+default = 20
+
+[[items]]
+section = "storage"
+name = "revlog.issue6528.fix-incoming"
+default = true
+
+[[items]]
+section = "storage"
+name = "revlog.optimize-delta-parent-choice"
+default = true
+alias = [["format", "aggressivemergedeltas"]]
+
+[[items]]
+section = "storage"
+name = "revlog.persistent-nodemap.mmap"
+default = true
+
+[[items]]
+section = "storage"
+name = "revlog.persistent-nodemap.slow-path"
+default = "abort"
+
+[[items]]
+section = "storage"
+name = "revlog.reuse-external-delta"
+default = true
+
+[[items]]
+section = "storage"
+name = "revlog.reuse-external-delta-parent"
+documentation = """This option is true unless `format.generaldelta` is set."""
+
+[[items]]
+section = "storage"
+name = "revlog.zlib.level"
+
+[[items]]
+section = "storage"
+name = "revlog.zstd.level"
+
+[[items]]
+section = "subrepos"
+name = "allowed"
+default-type = "dynamic"  # to make backporting simpler
+
+[[items]]
+section = "subrepos"
+name = "git:allowed"
+default-type = "dynamic"
+
+[[items]]
+section = "subrepos"
+name = "hg:allowed"
+default-type = "dynamic"
+
+[[items]]
+section = "subrepos"
+name = "svn:allowed"
+default-type = "dynamic"
+
+[[items]]
+section = "templateconfig"
+name = ".*"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "templates"
+name = ".*"
+generic = true
+
+[[items]]
+section = "trusted"
+name = "groups"
+default-type = "list_type"
+
+[[items]]
+section = "trusted"
+name = "users"
+default-type = "list_type"
+
+[[items]]
+section = "ui"
+name = "_usedassubrepo"
+default = false
+
+[[items]]
+section = "ui"
+name = "allowemptycommit"
+default = false
+
+[[items]]
+section = "ui"
+name = "archivemeta"
+default = true
+
+[[items]]
+section = "ui"
+name = "askusername"
+default = false
+
+[[items]]
+section = "ui"
+name = "available-memory"
+
+[[items]]
+section = "ui"
+name = "clonebundlefallback"
+default = false
+
+[[items]]
+section = "ui"
+name = "clonebundleprefers"
+default-type = "list_type"
+
+[[items]]
+section = "ui"
+name = "clonebundles"
+default = true
+
+[[items]]
+section = "ui"
+name = "color"
+default = "auto"
+
+[[items]]
+section = "ui"
+name = "commitsubrepos"
+default = false
+
+[[items]]
+section = "ui"
+name = "debug"
+default = false
+
+[[items]]
+section = "ui"
+name = "debugger"
+
+[[items]]
+section = "ui"
+name = "detailed-exit-code"
+default = false
+experimental = true
+
+[[items]]
+section = "ui"
+name = "editor"
+default-type = "dynamic"
+
+[[items]]
+section = "ui"
+name = "fallbackencoding"
+
+[[items]]
+section = "ui"
+name = "forcecwd"
+
+[[items]]
+section = "ui"
+name = "forcemerge"
+
+[[items]]
+section = "ui"
+name = "formatdebug"
+default = false
+
+[[items]]
+section = "ui"
+name = "formatjson"
+default = false
+
+[[items]]
+section = "ui"
+name = "formatted"
+
+[[items]]
+section = "ui"
+name = "interactive"
+
+[[items]]
+section = "ui"
+name = "interface"
+
+[[items]]
+section = "ui"
+name = "interface.chunkselector"
+
+[[items]]
+section = "ui"
+name = "large-file-limit"
+default = 10485760
+
+[[items]]
+section = "ui"
+name = "logblockedtimes"
+default = false
+
+[[items]]
+section = "ui"
+name = "merge"
+
+[[items]]
+section = "ui"
+name = "mergemarkers"
+default = "basic"
+
+[[items]]
+section = "ui"
+name = "message-output"
+default = "stdio"
+
+[[items]]
+section = "ui"
+name = "nontty"
+default = false
+
+[[items]]
+section = "ui"
+name = "origbackuppath"
+
+[[items]]
+section = "ui"
+name = "paginate"
+default = true
+
+[[items]]
+section = "ui"
+name = "patch"
+
+[[items]]
+section = "ui"
+name = "portablefilenames"
+default = "warn"
+
+[[items]]
+section = "ui"
+name = "promptecho"
+default = false
+
+[[items]]
+section = "ui"
+name = "quiet"
+default = false
+
+[[items]]
+section = "ui"
+name = "quietbookmarkmove"
+default = false
+
+[[items]]
+section = "ui"
+name = "relative-paths"
+default = "legacy"
+
+[[items]]
+section = "ui"
+name = "remotecmd"
+default = "hg"
+
+[[items]]
+section = "ui"
+name = "report_untrusted"
+default = true
+
+[[items]]
+section = "ui"
+name = "rollback"
+default = true
+
+[[items]]
+section = "ui"
+name = "signal-safe-lock"
+default = true
+
+[[items]]
+section = "ui"
+name = "slash"
+default = false
+
+[[items]]
+section = "ui"
+name = "ssh"
+default = "ssh"
+
+[[items]]
+section = "ui"
+name = "ssherrorhint"
+
+[[items]]
+section = "ui"
+name = "statuscopies"
+default = false
+
+[[items]]
+section = "ui"
+name = "strict"
+default = false
+
+[[items]]
+section = "ui"
+name = "style"
+default = ""
+
+[[items]]
+section = "ui"
+name = "supportcontact"
+
+[[items]]
+section = "ui"
+name = "textwidth"
+default = 78
+
+[[items]]
+section = "ui"
+name = "timeout"
+default = "600"
+
+[[items]]
+section = "ui"
+name = "timeout.warn"
+default = 0
+
+[[items]]
+section = "ui"
+name = "timestamp-output"
+default = false
+
+[[items]]
+section = "ui"
+name = "traceback"
+default = false
+
+[[items]]
+section = "ui"
+name = "tweakdefaults"
+default = false
+
+[[items]]
+section = "ui"
+name = "username"
+alias = [["ui", "user"]]
+
+[[items]]
+section = "ui"
+name = "verbose"
+default = false
+
+[[items]]
+section = "verify"
+name = "skipflags"
+default = 0
+
+[[items]]
+section = "web"
+name = "accesslog"
+default = "-"
+
+[[items]]
+section = "web"
+name = "address"
+default = ""
+
+[[items]]
+section = "web"
+name = "allow-archive"
+default-type = "list_type"
+alias = [["web", "allow_archive"]]
+
+[[items]]
+section = "web"
+name = "allow-pull"
+default = true
+alias = [["web", "allowpull"]]
+
+[[items]]
+section = "web"
+name = "allow-push"
+default-type = "list_type"
+alias = [["web", "allow_push"]]
+
+[[items]]
+section = "web"
+name = "allow_read"
+default-type = "list_type"
+
+[[items]]
+section = "web"
+name = "allowbz2"
+default = false
+
+[[items]]
+section = "web"
+name = "allowgz"
+default = false
+
+[[items]]
+section = "web"
+name = "allowzip"
+default = false
+
+[[items]]
+section = "web"
+name = "archivesubrepos"
+default = false
+
+[[items]]
+section = "web"
+name = "baseurl"
+
+[[items]]
+section = "web"
+name = "cacerts"
+
+[[items]]
+section = "web"
+name = "cache"
+default = true
+
+[[items]]
+section = "web"
+name = "certificate"
+
+[[items]]
+section = "web"
+name = "collapse"
+default = false
+
+[[items]]
+section = "web"
+name = "comparisoncontext"
+default = 5
+
+[[items]]
+section = "web"
+name = "contact"
+
+[[items]]
+section = "web"
+name = "csp"
+
+[[items]]
+section = "web"
+name = "deny_push"
+default-type = "list_type"
+
+[[items]]
+section = "web"
+name = "deny_read"
+default-type = "list_type"
+
+[[items]]
+section = "web"
+name = "descend"
+default = true
+
+[[items]]
+section = "web"
+name = "description"
+default = ""
+
+[[items]]
+section = "web"
+name = "encoding"
+default-type = "lazy_module"
+default = "encoding.encoding"
+
+[[items]]
+section = "web"
+name = "errorlog"
+default = "-"
+
+[[items]]
+section = "web"
+name = "guessmime"
+default = false
+
+[[items]]
+section = "web"
+name = "hidden"
+default = false
+
+[[items]]
+section = "web"
+name = "ipv6"
+default = false
+
+[[items]]
+section = "web"
+name = "labels"
+default-type = "list_type"
+
+[[items]]
+section = "web"
+name = "logoimg"
+default = "hglogo.png"
+
+[[items]]
+section = "web"
+name = "logourl"
+default = "https://mercurial-scm.org/"
+
+[[items]]
+section = "web"
+name = "maxchanges"
+default = 10
+
+[[items]]
+section = "web"
+name = "maxfiles"
+default = 10
+
+[[items]]
+section = "web"
+name = "maxshortchanges"
+default = 60
+
+[[items]]
+section = "web"
+name = "motd"
+default = ""
+
+[[items]]
+section = "web"
+name = "name"
+default-type = "dynamic"
+
+[[items]]
+section = "web"
+name = "port"
+default = 8000
+
+[[items]]
+section = "web"
+name = "prefix"
+default = ""
+
+[[items]]
+section = "web"
+name = "push_ssl"
+default = true
+
+[[items]]
+section = "web"
+name = "refreshinterval"
+default = 20
+
+[[items]]
+section = "web"
+name = "server-header"
+
+[[items]]
+section = "web"
+name = "static"
+
+[[items]]
+section = "web"
+name = "staticurl"
+
+[[items]]
+section = "web"
+name = "stripes"
+default = 1
+
+[[items]]
+section = "web"
+name = "style"
+default = "paper"
+
+[[items]]
+section = "web"
+name = "templates"
+
+[[items]]
+section = "web"
+name = "view"
+default = "served"
+experimental = true
+
+[[items]]
+section = "worker"
+name = "backgroundclose"
+default-type = "dynamic"
+
+[[items]]
+section = "worker"
+name = "backgroundclosemaxqueue"
+# Windows defaults to a limit of 512 open files. A buffer of 128
+# should give us enough headway.
+default = 384
+
+[[items]]
+section = "worker"
+name = "backgroundcloseminfilecount"
+default = 2048
+
+[[items]]
+section = "worker"
+name = "backgroundclosethreadcount"
+default = 4
+
+[[items]]
+section = "worker"
+name = "enabled"
+default = true
+
+[[items]]
+section = "worker"
+name = "numcpus"
+
+# Templates and template applications
+
+[[template-applications]]
+template = "diff-options"
+section = "annotate"
+
+[[template-applications]]
+template = "diff-options"
+section = "commands"
+prefix = "commit.interactive"
+
+[[template-applications]]
+template = "diff-options"
+section = "commands"
+prefix = "revert.interactive"
+
+[[template-applications]]
+template = "diff-options"
+section = "diff"
+
+[templates]
+[[templates.diff-options]]
+suffix = "nodates"
+default = false
+
+[[templates.diff-options]]
+suffix = "showfunc"
+default = false
+
+[[templates.diff-options]]
+suffix = "unified"
+
+[[templates.diff-options]]
+suffix = "git"
+default = false
+
+[[templates.diff-options]]
+suffix = "ignorews"
+default = false
+
+[[templates.diff-options]]
+suffix = "ignorewsamount"
+default = false
+
+[[templates.diff-options]]
+suffix = "ignoreblanklines"
+default = false
+
+[[templates.diff-options]]
+suffix = "ignorewseol"
+default = false
+
+[[templates.diff-options]]
+suffix = "nobinary"
+default = false
+
+[[templates.diff-options]]
+suffix = "noprefix"
+default = false
+
+[[templates.diff-options]]
+suffix = "word-diff"
+default = false
+
+# In-core extensions
+
+[[items]]
+section = "blackbox"
+name = "debug.to-stderr"
+default = false
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "dirty"
+default = false
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "maxsize"
+default = "1 MB"
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "logsource"
+default = false
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "maxfiles"
+default = 7
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "track"
+default-type = "lambda"
+default = ["*"]
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "ignore"
+default-type = "lambda"
+default = ["chgserver", "cmdserver", "extension"]
+in_core_extension = "blackbox"
+
+[[items]]
+section = "blackbox"
+name = "date-format"
+default = ""
+in_core_extension = "blackbox"
--- a/mercurial/context.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/context.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,9 +16,6 @@
     nullrev,
     short,
 )
-from .pycompat import (
-    getattr,
-)
 from . import (
     dagop,
     encoding,
--- a/mercurial/crecord.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/crecord.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 
 from .i18n import _
 from .pycompat import (
-    getattr,
     open,
 )
 from . import (
@@ -573,7 +572,7 @@
     ui.write(_(b'starting interactive selection\n'))
     chunkselector = curseschunkselector(headerlist, ui, operation)
     origsigtstp = sentinel = object()
-    if util.safehasattr(signal, 'SIGTSTP'):
+    if hasattr(signal, 'SIGTSTP'):
         origsigtstp = signal.getsignal(signal.SIGTSTP)
     try:
         with util.with_lc_ctype():
@@ -1944,7 +1943,7 @@
         """
 
         origsigwinch = sentinel = object()
-        if util.safehasattr(signal, 'SIGWINCH'):
+        if hasattr(signal, 'SIGWINCH'):
             origsigwinch = signal.signal(signal.SIGWINCH, self.sigwinchhandler)
         try:
             return self._main(stdscr)
--- a/mercurial/debugcommands.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/debugcommands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -33,7 +33,6 @@
     short,
 )
 from .pycompat import (
-    getattr,
     open,
 )
 from . import (
@@ -106,9 +105,7 @@
 )
 
 from .revlogutils import (
-    constants as revlog_constants,
     debug as revlog_debug,
-    deltas as deltautil,
     nodemap,
     rewrite,
     sidedata,
@@ -395,7 +392,6 @@
 
 def _debugobsmarkers(ui, part, indent=0, **opts):
     """display version and markers contained in 'data'"""
-    opts = pycompat.byteskwargs(opts)
     data = part.read()
     indent_string = b' ' * indent
     try:
@@ -408,7 +404,7 @@
         msg = b"%sversion: %d (%d bytes)\n"
         msg %= indent_string, version, len(data)
         ui.write(msg)
-        fm = ui.formatter(b'debugobsolete', opts)
+        fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
         for rawmarker in sorted(markers):
             m = obsutil.marker(None, rawmarker)
             fm.startitem()
@@ -486,8 +482,7 @@
 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
 def debugcapabilities(ui, path, **opts):
     """lists the capabilities of a remote peer"""
-    opts = pycompat.byteskwargs(opts)
-    peer = hg.peer(ui, opts, path)
+    peer = hg.peer(ui, pycompat.byteskwargs(opts), path)
     try:
         caps = peer.capabilities()
         ui.writenoi18n(b'Main capabilities:\n')
@@ -712,8 +707,7 @@
 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
 def debugdata(ui, repo, file_, rev=None, **opts):
     """dump the contents of a data file revision"""
-    opts = pycompat.byteskwargs(opts)
-    if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
+    if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
         if rev is not None:
             raise error.InputError(
                 _(b'cannot specify a revision with other arguments')
@@ -721,7 +715,9 @@
         file_, rev = None, file_
     elif rev is None:
         raise error.InputError(_(b'please specify a revision'))
-    r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
+    r = cmdutil.openstorage(
+        repo, b'debugdata', file_, pycompat.byteskwargs(opts)
+    )
     try:
         ui.write(r.rawdata(r.lookup(rev)))
     except KeyError:
@@ -750,7 +746,40 @@
 
 @command(
     b'debugdeltachain',
-    cmdutil.debugrevlogopts + cmdutil.formatteropts,
+    [
+        (
+            b'r',
+            b'rev',
+            [],
+            _('restrict processing to these revlog revisions'),
+        ),
+        (
+            b'',
+            b'all-info',
+            False,
+            _('compute all information unless specified otherwise'),
+        ),
+        (
+            b'',
+            b'size-info',
+            None,
+            _('compute information related to deltas size'),
+        ),
+        (
+            b'',
+            b'dist-info',
+            None,
+            _('compute information related to base distance'),
+        ),
+        (
+            b'',
+            b'sparse-info',
+            None,
+            _('compute information related to sparse read'),
+        ),
+    ]
+    + cmdutil.debugrevlogopts
+    + cmdutil.formatteropts,
     _(b'-c|-m|FILE'),
     optionalrepo=True,
 )
@@ -762,8 +791,10 @@
     :``rev``:       revision number
     :``p1``:        parent 1 revision number (for reference)
     :``p2``:        parent 2 revision number (for reference)
+
     :``chainid``:   delta chain identifier (numbered by unique base)
     :``chainlen``:  delta chain length to this revision
+
     :``prevrev``:   previous revision in delta chain
     :``deltatype``: role of delta / how it was computed
                     - base:  a full snapshot
@@ -776,11 +807,13 @@
                               (when p2 has empty delta
                     - prev:  a delta against the previous revision
                     - other: a delta against an arbitrary revision
+
     :``compsize``:  compressed size of revision
     :``uncompsize``: uncompressed size of revision
     :``chainsize``: total size of compressed revisions in chain
     :``chainratio``: total chain size divided by uncompressed revision size
                     (new delta chains typically start at ratio 2.00)
+
     :``lindist``:   linear distance from base revision in delta chain to end
                     of this revision
     :``extradist``: total size of revisions not part of this delta chain from
@@ -799,201 +832,82 @@
     :``readdensity``:  density of useful bytes in the data read from the disk
     :``srchunks``:  in how many data hunks the whole revision would be read
 
+    It is possible to select the information to be computed, this can provide a
+    noticeable speedup to the command in some cases.
+
+    Always computed:
+
+    - ``rev``
+    - ``p1``
+    - ``p2``
+    - ``chainid``
+    - ``chainlen``
+    - ``prevrev``
+    - ``deltatype``
+
+    Computed with --no-size-info
+
+    - ``compsize``
+    - ``uncompsize``
+    - ``chainsize``
+    - ``chainratio``
+
+    Computed with --no-dist-info
+
+    - ``lindist``
+    - ``extradist``
+    - ``extraratio``
+
+    Skipped with --no-sparse-info
+
+    - ``readsize``
+    - ``largestblock``
+    - ``readdensity``
+    - ``srchunks``
+
+    --
+
     The sparse read can be enabled with experimental.sparse-read = True
     """
-    opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
-    index = r.index
-    start = r.start
-    length = r.length
-    generaldelta = r._generaldelta
-    withsparseread = getattr(r, '_withsparseread', False)
-
-    # security to avoid crash on corrupted revlogs
-    total_revs = len(index)
-
-    chain_size_cache = {}
-
-    def revinfo(rev):
-        e = index[rev]
-        compsize = e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
-        uncompsize = e[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
-
-        base = e[revlog_constants.ENTRY_DELTA_BASE]
-        p1 = e[revlog_constants.ENTRY_PARENT_1]
-        p2 = e[revlog_constants.ENTRY_PARENT_2]
-
-        # If the parents of a revision has an empty delta, we never try to delta
-        # against that parent, but directly against the delta base of that
-        # parent (recursively). It avoids adding a useless entry in the chain.
-        #
-        # However we need to detect that as a special case for delta-type, that
-        # is not simply "other".
-        p1_base = p1
-        if p1 != nullrev and p1 < total_revs:
-            e1 = index[p1]
-            while e1[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
-                new_base = e1[revlog_constants.ENTRY_DELTA_BASE]
-                if (
-                    new_base == p1_base
-                    or new_base == nullrev
-                    or new_base >= total_revs
-                ):
-                    break
-                p1_base = new_base
-                e1 = index[p1_base]
-        p2_base = p2
-        if p2 != nullrev and p2 < total_revs:
-            e2 = index[p2]
-            while e2[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
-                new_base = e2[revlog_constants.ENTRY_DELTA_BASE]
-                if (
-                    new_base == p2_base
-                    or new_base == nullrev
-                    or new_base >= total_revs
-                ):
-                    break
-                p2_base = new_base
-                e2 = index[p2_base]
-
-        if generaldelta:
-            if base == p1:
-                deltatype = b'p1'
-            elif base == p2:
-                deltatype = b'p2'
-            elif base == rev:
-                deltatype = b'base'
-            elif base == p1_base:
-                deltatype = b'skip1'
-            elif base == p2_base:
-                deltatype = b'skip2'
-            elif r.issnapshot(rev):
-                deltatype = b'snap'
-            elif base == rev - 1:
-                deltatype = b'prev'
-            else:
-                deltatype = b'other'
-        else:
-            if base == rev:
-                deltatype = b'base'
-            else:
-                deltatype = b'prev'
-
-        chain = r._deltachain(rev)[0]
-        chain_size = 0
-        for iter_rev in reversed(chain):
-            cached = chain_size_cache.get(iter_rev)
-            if cached is not None:
-                chain_size += cached
-                break
-            e = index[iter_rev]
-            chain_size += e[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH]
-        chain_size_cache[rev] = chain_size
-
-        return p1, p2, compsize, uncompsize, deltatype, chain, chain_size
-
-    fm = ui.formatter(b'debugdeltachain', opts)
-
-    fm.plain(
-        b'    rev      p1      p2  chain# chainlen     prev   delta       '
-        b'size    rawsize  chainsize     ratio   lindist extradist '
-        b'extraratio'
+    revs = None
+    revs_opt = opts.pop('rev', [])
+    if revs_opt:
+        revs = [int(r) for r in revs_opt]
+
+    all_info = opts.pop('all_info', False)
+    size_info = opts.pop('size_info', None)
+    if size_info is None:
+        size_info = all_info
+    dist_info = opts.pop('dist_info', None)
+    if dist_info is None:
+        dist_info = all_info
+    sparse_info = opts.pop('sparse_info', None)
+    if sparse_info is None:
+        sparse_info = all_info
+
+    revlog = cmdutil.openrevlog(
+        repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
     )
-    if withsparseread:
-        fm.plain(b'   readsize largestblk rddensity srchunks')
-    fm.plain(b'\n')
-
-    chainbases = {}
-    for rev in r:
-        p1, p2, comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
-        chainbase = chain[0]
-        chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
-        basestart = start(chainbase)
-        revstart = start(rev)
-        lineardist = revstart + comp - basestart
-        extradist = lineardist - chainsize
-        try:
-            prevrev = chain[-2]
-        except IndexError:
-            prevrev = -1
-
-        if uncomp != 0:
-            chainratio = float(chainsize) / float(uncomp)
-        else:
-            chainratio = chainsize
-
-        if chainsize != 0:
-            extraratio = float(extradist) / float(chainsize)
-        else:
-            extraratio = extradist
-
+    fm = ui.formatter(b'debugdeltachain', pycompat.byteskwargs(opts))
+
+    lines = revlog_debug.debug_delta_chain(
+        revlog,
+        revs=revs,
+        size_info=size_info,
+        dist_info=dist_info,
+        sparse_info=sparse_info,
+    )
+    # first entry is the header
+    header = next(lines)
+    fm.plain(header)
+    for entry in lines:
+        label = b' '.join(e[0] for e in entry)
+        format = b' '.join(e[1] for e in entry)
+        values = [e[3] for e in entry]
+        data = dict((e[2], e[3]) for e in entry)
         fm.startitem()
-        fm.write(
-            b'rev p1 p2 chainid chainlen prevrev deltatype compsize '
-            b'uncompsize chainsize chainratio lindist extradist '
-            b'extraratio',
-            b'%7d %7d %7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
-            rev,
-            p1,
-            p2,
-            chainid,
-            len(chain),
-            prevrev,
-            deltatype,
-            comp,
-            uncomp,
-            chainsize,
-            chainratio,
-            lineardist,
-            extradist,
-            extraratio,
-            rev=rev,
-            chainid=chainid,
-            chainlen=len(chain),
-            prevrev=prevrev,
-            deltatype=deltatype,
-            compsize=comp,
-            uncompsize=uncomp,
-            chainsize=chainsize,
-            chainratio=chainratio,
-            lindist=lineardist,
-            extradist=extradist,
-            extraratio=extraratio,
-        )
-        if withsparseread:
-            readsize = 0
-            largestblock = 0
-            srchunks = 0
-
-            for revschunk in deltautil.slicechunk(r, chain):
-                srchunks += 1
-                blkend = start(revschunk[-1]) + length(revschunk[-1])
-                blksize = blkend - start(revschunk[0])
-
-                readsize += blksize
-                if largestblock < blksize:
-                    largestblock = blksize
-
-            if readsize:
-                readdensity = float(chainsize) / float(readsize)
-            else:
-                readdensity = 1
-
-            fm.write(
-                b'readsize largestblock readdensity srchunks',
-                b' %10d %10d %9.5f %8d',
-                readsize,
-                largestblock,
-                readdensity,
-                srchunks,
-                readsize=readsize,
-                largestblock=largestblock,
-                readdensity=readdensity,
-                srchunks=srchunks,
-            )
-
+        fm.write(label, format, *values, **data)
         fm.plain(b'\n')
-
     fm.end()
 
 
@@ -1027,7 +941,6 @@
 
     note: the process is initiated from a full text of the revision to store.
     """
-    opts = pycompat.byteskwargs(opts)
     if arg_2 is None:
         file_ = None
         rev = arg_1
@@ -1037,7 +950,9 @@
 
     rev = int(rev)
 
-    revlog = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
+    revlog = cmdutil.openrevlog(
+        repo, b'debugdeltachain', file_, pycompat.byteskwargs(opts)
+    )
     p1r, p2r = revlog.parentrevs(rev)
 
     if source == b'full':
@@ -1234,22 +1149,21 @@
 
       Control the initial size of the discovery for initial change
     """
-    opts = pycompat.byteskwargs(opts)
     unfi = repo.unfiltered()
 
     # setup potential extra filtering
-    local_revs = opts[b"local_as_revs"]
-    remote_revs = opts[b"remote_as_revs"]
+    local_revs = opts["local_as_revs"]
+    remote_revs = opts["remote_as_revs"]
 
     # make sure tests are repeatable
-    random.seed(int(opts[b'seed']))
+    random.seed(int(opts['seed']))
 
     if not remote_revs:
         path = urlutil.get_unique_pull_path_obj(
             b'debugdiscovery', ui, remoteurl
         )
         branches = (path.branch, [])
-        remote = hg.peer(repo, opts, path)
+        remote = hg.peer(repo, pycompat.byteskwargs(opts), path)
         ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
     else:
         branches = (None, [])
@@ -1279,10 +1193,10 @@
         repo = repo.filtered(b'debug-discovery-local-filter')
 
     data = {}
-    if opts.get(b'old'):
+    if opts.get('old'):
 
         def doit(pushedrevs, remoteheads, remote=remote):
-            if not util.safehasattr(remote, 'branches'):
+            if not hasattr(remote, 'branches'):
                 # enable in-client legacy support
                 remote = localrepo.locallegacypeer(remote.local())
                 if remote_revs:
@@ -1292,7 +1206,7 @@
                 repo, remote, force=True, audit=data
             )
             common = set(common)
-            if not opts.get(b'nonheads'):
+            if not opts.get('nonheads'):
                 ui.writenoi18n(
                     b"unpruned common: %s\n"
                     % b" ".join(sorted(short(n) for n in common))
@@ -1321,9 +1235,9 @@
             return common, hds
 
     remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
-    localrevs = opts[b'rev']
-
-    fm = ui.formatter(b'debugdiscovery', opts)
+    localrevs = opts['rev']
+
+    fm = ui.formatter(b'debugdiscovery', pycompat.byteskwargs(opts))
     if fm.strict_format:
 
         @contextlib.contextmanager
@@ -1474,15 +1388,14 @@
 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
 def debugextensions(ui, repo, **opts):
     '''show information about active extensions'''
-    opts = pycompat.byteskwargs(opts)
     exts = extensions.extensions(ui)
     hgver = util.version()
-    fm = ui.formatter(b'debugextensions', opts)
+    fm = ui.formatter(b'debugextensions', pycompat.byteskwargs(opts))
     for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
         isinternal = extensions.ismoduleinternal(extmod)
         extsource = None
 
-        if util.safehasattr(extmod, '__file__'):
+        if hasattr(extmod, '__file__'):
             extsource = pycompat.fsencode(extmod.__file__)
         elif getattr(sys, 'oxidized', False):
             extsource = pycompat.sysexecutable
@@ -1571,8 +1484,8 @@
     from . import fileset
 
     fileset.symbols  # force import of fileset so we have predicates to optimize
-    opts = pycompat.byteskwargs(opts)
-    ctx = logcmdutil.revsingle(repo, opts.get(b'rev'), None)
+
+    ctx = logcmdutil.revsingle(repo, opts.get('rev'), None)
 
     stages = [
         (b'parsed', pycompat.identity),
@@ -1582,32 +1495,32 @@
     stagenames = {n for n, f in stages}
 
     showalways = set()
-    if ui.verbose and not opts[b'show_stage']:
+    if ui.verbose and not opts['show_stage']:
         # show parsed tree by --verbose (deprecated)
         showalways.add(b'parsed')
-    if opts[b'show_stage'] == [b'all']:
+    if opts['show_stage'] == [b'all']:
         showalways.update(stagenames)
     else:
-        for n in opts[b'show_stage']:
+        for n in opts['show_stage']:
             if n not in stagenames:
                 raise error.Abort(_(b'invalid stage name: %s') % n)
-        showalways.update(opts[b'show_stage'])
+        showalways.update(opts['show_stage'])
 
     tree = filesetlang.parse(expr)
     for n, f in stages:
         tree = f(tree)
         if n in showalways:
-            if opts[b'show_stage'] or n != b'parsed':
+            if opts['show_stage'] or n != b'parsed':
                 ui.write(b"* %s:\n" % n)
             ui.write(filesetlang.prettyformat(tree), b"\n")
 
     files = set()
-    if opts[b'all_files']:
+    if opts['all_files']:
         for r in repo:
             c = repo[r]
             files.update(c.files())
             files.update(c.substate)
-    if opts[b'all_files'] or ctx.rev() is None:
+    if opts['all_files'] or ctx.rev() is None:
         wctx = repo[None]
         files.update(
             repo.dirstate.walk(
@@ -1623,7 +1536,7 @@
         files.update(ctx.substate)
 
     m = ctx.matchfileset(repo.getcwd(), expr)
-    if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
+    if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
         ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
     for f in sorted(files):
         if not m(f):
@@ -1711,18 +1624,17 @@
 
     Use --verbose to get extra information about current config value and
     Mercurial default."""
-    opts = pycompat.byteskwargs(opts)
     maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
     maxvariantlength = max(len(b'format-variant'), maxvariantlength)
 
     def makeformatname(name):
         return b'%s:' + (b' ' * (maxvariantlength - len(name)))
 
-    fm = ui.formatter(b'debugformat', opts)
+    fm = ui.formatter(b'debugformat', pycompat.byteskwargs(opts))
     if fm.isplain():
 
         def formatvalue(value):
-            if util.safehasattr(value, 'startswith'):
+            if hasattr(value, 'startswith'):
                 return value
             if value:
                 return b'yes'
@@ -1823,8 +1735,7 @@
     Every ID must be a full-length hex node id string. Saves the bundle to the
     given file.
     """
-    opts = pycompat.byteskwargs(opts)
-    repo = hg.peer(ui, opts, repopath)
+    repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
     if not repo.capable(b'getbundle'):
         raise error.Abort(b"getbundle() not supported by target repository")
     args = {}
@@ -1836,7 +1747,7 @@
     args['bundlecaps'] = None
     bundle = repo.getbundle(b'debug', **args)
 
-    bundletype = opts.get(b'type', b'bzip2').lower()
+    bundletype = opts.get('type', b'bzip2').lower()
     btypes = {
         b'none': b'HG10UN',
         b'bzip2': b'HG10BZ',
@@ -1930,8 +1841,9 @@
 )
 def debugindexdot(ui, repo, file_=None, **opts):
     """dump an index DAG as a graphviz dot file"""
-    opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
+    r = cmdutil.openstorage(
+        repo, b'debugindexdot', file_, pycompat.byteskwargs(opts)
+    )
     ui.writenoi18n(b"digraph G {\n")
     for i in r:
         node = r.node(i)
@@ -1947,7 +1859,7 @@
     """show stats related to the changelog index"""
     repo.changelog.shortest(repo.nullid, 1)
     index = repo.changelog.index
-    if not util.safehasattr(index, 'stats'):
+    if not hasattr(index, 'stats'):
         raise error.Abort(_(b'debugindexstats only works with native code'))
     for k, v in sorted(index.stats().items()):
         ui.write(b'%s: %d\n' % (k, v))
@@ -1959,11 +1871,9 @@
 
     Returns 0 on success.
     """
-    opts = pycompat.byteskwargs(opts)
-
     problems = 0
 
-    fm = ui.formatter(b'debuginstall', opts)
+    fm = ui.formatter(b'debuginstall', pycompat.byteskwargs(opts))
     fm.startitem()
 
     # encoding might be unknown or wrong. don't translate these messages.
@@ -1983,7 +1893,7 @@
 
     # Python
     pythonlib = None
-    if util.safehasattr(os, '__file__'):
+    if hasattr(os, '__file__'):
         pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
     elif getattr(sys, 'oxidized', False):
         pythonlib = pycompat.sysexecutable
@@ -2065,7 +1975,7 @@
 
     # compiled modules
     hgmodules = None
-    if util.safehasattr(sys.modules[__name__], '__file__'):
+    if hasattr(sys.modules[__name__], '__file__'):
         hgmodules = os.path.dirname(pycompat.fsencode(__file__))
     elif getattr(sys, 'oxidized', False):
         hgmodules = pycompat.sysexecutable
@@ -2260,8 +2170,7 @@
     Every ID must be a full-length hex node id string. Returns a list of 0s
     and 1s indicating unknown/known.
     """
-    opts = pycompat.byteskwargs(opts)
-    repo = hg.peer(ui, opts, repopath)
+    repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
     if not repo.capable(b'known'):
         raise error.Abort(b"known() not supported by target repository")
     flags = repo.known([bin(s) for s in ids])
@@ -2496,9 +2405,8 @@
         else:
             ui.writenoi18n(b'v1 and v2 states mismatch: using v1\n')
 
-    opts = pycompat.byteskwargs(opts)
-    if not opts[b'template']:
-        opts[b'template'] = (
+    if not opts['template']:
+        opts['template'] = (
             b'{if(commits, "", "no merge state found\n")}'
             b'{commits % "{name}{if(label, " ({label})")}: {node}\n"}'
             b'{files % "file: {path} (state \\"{state}\\")\n'
@@ -2518,7 +2426,7 @@
 
     ms = mergestatemod.mergestate.read(repo)
 
-    fm = ui.formatter(b'debugmergestate', opts)
+    fm = ui.formatter(b'debugmergestate', pycompat.byteskwargs(opts))
     fm.startitem()
 
     fm_commits = fm.nested(b'commits')
@@ -2649,7 +2557,7 @@
     if isinstance(r, (manifest.manifestrevlog, filelog.filelog)):
         r = r._revlog
     if opts['dump_new']:
-        if util.safehasattr(r.index, "nodemap_data_all"):
+        if hasattr(r.index, "nodemap_data_all"):
             data = r.index.nodemap_data_all()
         else:
             data = nodemap.persistent_data(r.index)
@@ -2706,8 +2614,6 @@
 
     With no arguments, displays the list of obsolescence markers."""
 
-    opts = pycompat.byteskwargs(opts)
-
     def parsenodeid(s):
         try:
             # We do not use revsingle/revrange functions here to accept
@@ -2723,9 +2629,9 @@
                 b'node identifiers'
             )
 
-    if opts.get(b'delete'):
+    if opts.get('delete'):
         indices = []
-        for v in opts.get(b'delete'):
+        for v in opts.get('delete'):
             try:
                 indices.append(int(v))
             except ValueError:
@@ -2746,25 +2652,25 @@
         return
 
     if precursor is not None:
-        if opts[b'rev']:
+        if opts['rev']:
             raise error.InputError(
                 b'cannot select revision when creating marker'
             )
         metadata = {}
-        metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
+        metadata[b'user'] = encoding.fromlocal(opts['user'] or ui.username())
         succs = tuple(parsenodeid(succ) for succ in successors)
         l = repo.lock()
         try:
             tr = repo.transaction(b'debugobsolete')
             try:
-                date = opts.get(b'date')
+                date = opts.get('date')
                 if date:
                     date = dateutil.parsedate(date)
                 else:
                     date = None
                 prec = parsenodeid(precursor)
                 parents = None
-                if opts[b'record_parents']:
+                if opts['record_parents']:
                     if prec not in repo.unfiltered():
                         raise error.Abort(
                             b'cannot used --record-parents on '
@@ -2776,7 +2682,7 @@
                     tr,
                     prec,
                     succs,
-                    opts[b'flags'],
+                    opts['flags'],
                     parents=parents,
                     date=date,
                     metadata=metadata,
@@ -2792,12 +2698,12 @@
         finally:
             l.release()
     else:
-        if opts[b'rev']:
-            revs = logcmdutil.revrange(repo, opts[b'rev'])
+        if opts['rev']:
+            revs = logcmdutil.revrange(repo, opts['rev'])
             nodes = [repo[r].node() for r in revs]
             markers = list(
                 obsutil.getmarkers(
-                    repo, nodes=nodes, exclusive=opts[b'exclusive']
+                    repo, nodes=nodes, exclusive=opts['exclusive']
                 )
             )
             markers.sort(key=lambda x: x._data)
@@ -2806,12 +2712,12 @@
 
         markerstoiter = markers
         isrelevant = lambda m: True
-        if opts.get(b'rev') and opts.get(b'index'):
+        if opts.get('rev') and opts.get('index'):
             markerstoiter = obsutil.getmarkers(repo)
             markerset = set(markers)
             isrelevant = lambda m: m in markerset
 
-        fm = ui.formatter(b'debugobsolete', opts)
+        fm = ui.formatter(b'debugobsolete', pycompat.byteskwargs(opts))
         for i, m in enumerate(markerstoiter):
             if not isrelevant(m):
                 # marker can be irrelevant when we're iterating over a set
@@ -2823,7 +2729,7 @@
                 # are relevant to --rev value
                 continue
             fm.startitem()
-            ind = i if opts.get(b'index') else None
+            ind = i if opts.get('index') else None
             cmdutil.showmarker(fm, m, index=ind)
         fm.end()
 
@@ -2836,8 +2742,7 @@
 def debugp1copies(ui, repo, **opts):
     """dump copy information compared to p1"""
 
-    opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
+    ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
     for dst, src in ctx.p1copies().items():
         ui.write(b'%s -> %s\n' % (src, dst))
 
@@ -2850,8 +2755,7 @@
 def debugp2copies(ui, repo, **opts):
     """dump copy information compared to p2"""
 
-    opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
+    ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
     for dst, src in ctx.p2copies().items():
         ui.write(b'%s -> %s\n' % (src, dst))
 
@@ -3019,11 +2923,10 @@
     information, even with --debug. In such case, information above is
     useful to know why a merge tool is chosen.
     """
-    opts = pycompat.byteskwargs(opts)
     overrides = {}
-    if opts[b'tool']:
-        overrides[(b'ui', b'forcemerge')] = opts[b'tool']
-        ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
+    if opts['tool']:
+        overrides[(b'ui', b'forcemerge')] = opts['tool']
+        ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts['tool'])))
 
     with ui.configoverride(overrides, b'debugmergepatterns'):
         hgmerge = encoding.environ.get(b"HGMERGE")
@@ -3033,9 +2936,9 @@
         if uimerge:
             ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
 
-        ctx = scmutil.revsingle(repo, opts.get(b'rev'))
-        m = scmutil.match(ctx, pats, opts)
-        changedelete = opts[b'changedelete']
+        ctx = scmutil.revsingle(repo, opts.get('rev'))
+        m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
+        changedelete = opts['changedelete']
         for path in ctx.walk(m):
             fctx = ctx[path]
             with ui.silent(
@@ -3184,8 +3087,7 @@
 )
 def debugrebuildfncache(ui, repo, **opts):
     """rebuild the fncache file"""
-    opts = pycompat.byteskwargs(opts)
-    repair.rebuildfncache(ui, repo, opts.get(b"only_data"))
+    repair.rebuildfncache(ui, repo, opts.get("only_data"))
 
 
 @command(
@@ -3196,9 +3098,8 @@
 def debugrename(ui, repo, *pats, **opts):
     """dump rename information"""
 
-    opts = pycompat.byteskwargs(opts)
-    ctx = scmutil.revsingle(repo, opts.get(b'rev'))
-    m = scmutil.match(ctx, pats, opts)
+    ctx = scmutil.revsingle(repo, opts.get('rev'))
+    m = scmutil.match(ctx, pats, pycompat.byteskwargs(opts))
     for abs in ctx.walk(m):
         fctx = ctx[abs]
         o = fctx.filelog().renamed(fctx.filenode())
@@ -3224,10 +3125,11 @@
 )
 def debugrevlog(ui, repo, file_=None, **opts):
     """show data and statistics about a revlog"""
-    opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
-
-    if opts.get(b"dump"):
+    r = cmdutil.openrevlog(
+        repo, b'debugrevlog', file_, pycompat.byteskwargs(opts)
+    )
+
+    if opts.get("dump"):
         revlog_debug.dump(ui, r)
     else:
         revlog_debug.debug_revlog(ui, r)
@@ -3243,9 +3145,10 @@
 )
 def debugrevlogindex(ui, repo, file_=None, **opts):
     """dump the contents of a revlog index"""
-    opts = pycompat.byteskwargs(opts)
-    r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
-    format = opts.get(b'format', 0)
+    r = cmdutil.openrevlog(
+        repo, b'debugrevlogindex', file_, pycompat.byteskwargs(opts)
+    )
+    format = opts.get('format', 0)
     if format not in (0, 1):
         raise error.Abort(_(b"unknown format %d") % format)
 
@@ -3394,7 +3297,6 @@
     Use --verify-optimized to compare the optimized result with the unoptimized
     one. Returns 1 if the optimized result differs.
     """
-    opts = pycompat.byteskwargs(opts)
     aliases = ui.configitems(b'revsetalias')
     stages = [
         (b'parsed', lambda tree: tree),
@@ -3406,9 +3308,9 @@
         (b'analyzed', revsetlang.analyze),
         (b'optimized', revsetlang.optimize),
     ]
-    if opts[b'no_optimized']:
+    if opts['no_optimized']:
         stages = stages[:-1]
-    if opts[b'verify_optimized'] and opts[b'no_optimized']:
+    if opts['verify_optimized'] and opts['no_optimized']:
         raise error.Abort(
             _(b'cannot use --verify-optimized with --no-optimized')
         )
@@ -3416,21 +3318,21 @@
 
     showalways = set()
     showchanged = set()
-    if ui.verbose and not opts[b'show_stage']:
+    if ui.verbose and not opts['show_stage']:
         # show parsed tree by --verbose (deprecated)
         showalways.add(b'parsed')
         showchanged.update([b'expanded', b'concatenated'])
-        if opts[b'optimize']:
+        if opts['optimize']:
             showalways.add(b'optimized')
-    if opts[b'show_stage'] and opts[b'optimize']:
+    if opts['show_stage'] and opts['optimize']:
         raise error.Abort(_(b'cannot use --optimize with --show-stage'))
-    if opts[b'show_stage'] == [b'all']:
+    if opts['show_stage'] == [b'all']:
         showalways.update(stagenames)
     else:
-        for n in opts[b'show_stage']:
+        for n in opts['show_stage']:
             if n not in stagenames:
                 raise error.Abort(_(b'invalid stage name: %s') % n)
-        showalways.update(opts[b'show_stage'])
+        showalways.update(opts['show_stage'])
 
     treebystage = {}
     printedtree = None
@@ -3438,15 +3340,15 @@
     for n, f in stages:
         treebystage[n] = tree = f(tree)
         if n in showalways or (n in showchanged and tree != printedtree):
-            if opts[b'show_stage'] or n != b'parsed':
+            if opts['show_stage'] or n != b'parsed':
                 ui.write(b"* %s:\n" % n)
             ui.write(revsetlang.prettyformat(tree), b"\n")
             printedtree = tree
 
-    if opts[b'verify_optimized']:
+    if opts['verify_optimized']:
         arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
         brevs = revset.makematcher(treebystage[b'optimized'])(repo)
-        if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
+        if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
             ui.writenoi18n(
                 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
             )
@@ -3474,9 +3376,9 @@
 
     func = revset.makematcher(tree)
     revs = func(repo)
-    if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
+    if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
         ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
-    if not opts[b'show_revs']:
+    if not opts['show_revs']:
         return
     for c in revs:
         ui.write(b"%d\n" % c)
@@ -3503,30 +3405,28 @@
     workaround to the fact that ``hg serve --stdio`` must have specific
     arguments for security reasons.
     """
-    opts = pycompat.byteskwargs(opts)
-
-    if not opts[b'sshstdio']:
+    if not opts['sshstdio']:
         raise error.Abort(_(b'only --sshstdio is currently supported'))
 
     logfh = None
 
-    if opts[b'logiofd'] and opts[b'logiofile']:
+    if opts['logiofd'] and opts['logiofile']:
         raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
 
-    if opts[b'logiofd']:
+    if opts['logiofd']:
         # Ideally we would be line buffered. But line buffering in binary
         # mode isn't supported and emits a warning in Python 3.8+. Disabling
         # buffering could have performance impacts. But since this isn't
         # performance critical code, it should be fine.
         try:
-            logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 0)
+            logfh = os.fdopen(int(opts['logiofd']), 'ab', 0)
         except OSError as e:
             if e.errno != errno.ESPIPE:
                 raise
             # can't seek a pipe, so `ab` mode fails on py3
-            logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 0)
-    elif opts[b'logiofile']:
-        logfh = open(opts[b'logiofile'], b'ab', 0)
+            logfh = os.fdopen(int(opts['logiofd']), 'wb', 0)
+    elif opts['logiofile']:
+        logfh = open(opts['logiofile'], b'ab', 0)
 
     s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
     s.serve_forever()
@@ -3566,8 +3466,7 @@
     """dump the side data for a cl/manifest/file revision
 
     Use --verbose to dump the sidedata content."""
-    opts = pycompat.byteskwargs(opts)
-    if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
+    if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
         if rev is not None:
             raise error.InputError(
                 _(b'cannot specify a revision with other arguments')
@@ -3575,7 +3474,9 @@
         file_, rev = None, file_
     elif rev is None:
         raise error.InputError(_(b'please specify a revision'))
-    r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
+    r = cmdutil.openstorage(
+        repo, b'debugdata', file_, pycompat.byteskwargs(opts)
+    )
     r = getattr(r, '_revlog', r)
     try:
         sidedata = r.sidedata(r.lookup(rev))
@@ -3748,13 +3649,12 @@
     )
     backups.sort(key=lambda x: os.path.getmtime(x), reverse=True)
 
-    opts = pycompat.byteskwargs(opts)
-    opts[b"bundle"] = b""
-    opts[b"force"] = None
-    limit = logcmdutil.getlimit(opts)
+    opts["bundle"] = b""
+    opts["force"] = None
+    limit = logcmdutil.getlimit(pycompat.byteskwargs(opts))
 
     def display(other, chlist, displayer):
-        if opts.get(b"newest_first"):
+        if opts.get("newest_first"):
             chlist.reverse()
         count = 0
         for n in chlist:
@@ -3763,12 +3663,12 @@
             parents = [
                 True for p in other.changelog.parents(n) if p != repo.nullid
             ]
-            if opts.get(b"no_merges") and len(parents) == 2:
+            if opts.get("no_merges") and len(parents) == 2:
                 continue
             count += 1
             displayer.show(other[n])
 
-    recovernode = opts.get(b"recover")
+    recovernode = opts.get("recover")
     if recovernode:
         if scmutil.isrevsymbol(repo, recovernode):
             ui.warn(_(b"%s already exists in the repo\n") % recovernode)
@@ -3792,15 +3692,15 @@
             source,
         )
         try:
-            other = hg.peer(repo, opts, path)
+            other = hg.peer(repo, pycompat.byteskwargs(opts), path)
         except error.LookupError as ex:
             msg = _(b"\nwarning: unable to open bundle %s") % path.loc
             hint = _(b"\n(missing parent rev %s)\n") % short(ex.name)
             ui.warn(msg, hint=hint)
             continue
-        branches = (path.branch, opts.get(b'branch', []))
+        branches = (path.branch, opts.get('branch', []))
         revs, checkout = hg.addbranchrevs(
-            repo, other, branches, opts.get(b"rev")
+            repo, other, branches, opts.get("rev")
         )
 
         if revs:
@@ -3809,7 +3709,7 @@
         with ui.silent():
             try:
                 other, chlist, cleanupfn = bundlerepo.getremotechanges(
-                    ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+                    ui, repo, other, revs, opts["bundle"], opts["force"]
                 )
             except error.LookupError:
                 continue
@@ -3846,10 +3746,10 @@
                     ui.status(b"%s%s\n" % (b"bundle:".ljust(13), path.loc))
                 else:
                     opts[
-                        b"template"
+                        "template"
                     ] = b"{label('status.modified', node|short)} {desc|firstline}\n"
                 displayer = logcmdutil.changesetdisplayer(
-                    ui, other, opts, False
+                    ui, other, pycompat.byteskwargs(opts), False
                 )
                 display(other, chlist, displayer)
                 displayer.close()
@@ -3932,10 +3832,9 @@
 )
 def debug_revlog_stats(ui, repo, **opts):
     """display statistics about revlogs in the store"""
-    opts = pycompat.byteskwargs(opts)
-    changelog = opts[b"changelog"]
-    manifest = opts[b"manifest"]
-    filelogs = opts[b"filelogs"]
+    changelog = opts["changelog"]
+    manifest = opts["manifest"]
+    filelogs = opts["filelogs"]
 
     if changelog is None and manifest is None and filelogs is None:
         changelog = True
@@ -3943,7 +3842,7 @@
         filelogs = True
 
     repo = repo.unfiltered()
-    fm = ui.formatter(b'debug-revlog-stats', opts)
+    fm = ui.formatter(b'debug-revlog-stats', pycompat.byteskwargs(opts))
     revlog_debug.debug_revlog_stats(repo, fm, changelog, manifest, filelogs)
     fm.end()
 
@@ -4182,8 +4081,7 @@
 )
 def debugwalk(ui, repo, *pats, **opts):
     """show how files match on given patterns"""
-    opts = pycompat.byteskwargs(opts)
-    m = scmutil.match(repo[None], pats, opts)
+    m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
     if ui.verbose:
         ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
     items = list(repo[None].walk(m))
@@ -4236,16 +4134,15 @@
     norepo=True,
 )
 def debugwireargs(ui, repopath, *vals, **opts):
-    opts = pycompat.byteskwargs(opts)
-    repo = hg.peer(ui, opts, repopath)
+    repo = hg.peer(ui, pycompat.byteskwargs(opts), repopath)
     try:
         for opt in cmdutil.remoteopts:
-            del opts[opt[1]]
+            del opts[pycompat.sysstr(opt[1])]
         args = {}
         for k, v in opts.items():
             if v:
                 args[k] = v
-        args = pycompat.strkwargs(args)
+
         # run twice to check that we don't mess up the stream for the next command
         res1 = repo.debugwireargs(*vals, **args)
         res2 = repo.debugwireargs(*vals, **args)
@@ -4501,12 +4398,10 @@
     resulting object is fed into a CBOR encoder. Otherwise it is interpreted
     as a Python byte string literal.
     """
-    opts = pycompat.byteskwargs(opts)
-
-    if opts[b'localssh'] and not repo:
+    if opts['localssh'] and not repo:
         raise error.Abort(_(b'--localssh requires a repository'))
 
-    if opts[b'peer'] and opts[b'peer'] not in (
+    if opts['peer'] and opts['peer'] not in (
         b'raw',
         b'ssh1',
     ):
@@ -4515,7 +4410,7 @@
             hint=_(b'valid values are "raw" and "ssh1"'),
         )
 
-    if path and opts[b'localssh']:
+    if path and opts['localssh']:
         raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
 
     if ui.interactive():
@@ -4529,7 +4424,7 @@
     stderr = None
     opener = None
 
-    if opts[b'localssh']:
+    if opts['localssh']:
         # We start the SSH server in its own process so there is process
         # separation. This prevents a whole class of potential bugs around
         # shared state from interfering with server operation.
@@ -4552,7 +4447,7 @@
         stderr = proc.stderr
 
         # We turn the pipes into observers so we can log I/O.
-        if ui.verbose or opts[b'peer'] == b'raw':
+        if ui.verbose or opts['peer'] == b'raw':
             stdin = util.makeloggingfileobject(
                 ui, proc.stdin, b'i', logdata=True
             )
@@ -4566,9 +4461,9 @@
         # --localssh also implies the peer connection settings.
 
         url = b'ssh://localserver'
-        autoreadstderr = not opts[b'noreadstderr']
-
-        if opts[b'peer'] == b'ssh1':
+        autoreadstderr = not opts['noreadstderr']
+
+        if opts['peer'] == b'ssh1':
             ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
             peer = sshpeer.sshv1peer(
                 ui,
@@ -4580,7 +4475,7 @@
                 None,
                 autoreadstderr=autoreadstderr,
             )
-        elif opts[b'peer'] == b'raw':
+        elif opts['peer'] == b'raw':
             ui.write(_(b'using raw connection to peer\n'))
             peer = None
         else:
@@ -4627,17 +4522,17 @@
         # Don't send default headers when in raw mode. This allows us to
         # bypass most of the behavior of our URL handling code so we can
         # have near complete control over what's sent on the wire.
-        if opts[b'peer'] == b'raw':
+        if opts['peer'] == b'raw':
             openerargs['sendaccept'] = False
 
         opener = urlmod.opener(ui, authinfo, **openerargs)
 
-        if opts[b'peer'] == b'raw':
+        if opts['peer'] == b'raw':
             ui.write(_(b'using raw connection to peer\n'))
             peer = None
-        elif opts[b'peer']:
+        elif opts['peer']:
             raise error.Abort(
-                _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
+                _(b'--peer %s not supported with HTTP peers') % opts['peer']
             )
         else:
             peer_path = urlutil.try_path(ui, path)
--- a/mercurial/dirstate.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/dirstate.py	Tue Nov 07 15:21:11 2023 +0100
@@ -13,7 +13,6 @@
 import uuid
 
 from .i18n import _
-from .pycompat import delattr
 
 from hgdemandimport import tracing
 
@@ -344,11 +343,52 @@
 
     @contextlib.contextmanager
     def changing_parents(self, repo):
+        """Wrap a dirstate change related to a change of working copy parents
+
+        This context scopes a series of dirstate modifications that match an
+        update of the working copy parents (typically `hg update`, `hg merge`
+        etc).
+
+        The dirstate's methods that perform this kind of modifications require
+        this context to be present before being called.
+        Such methods are decorated with `@requires_changing_parents`.
+
+        The new dirstate contents will be written to disk when the top-most
+        `changing_parents` context exits successfully. If an exception is
+        raised during a `changing_parents` context of any level, all changes
+        are invalidated. If this context is open within an open transaction,
+        the dirstate writing is delayed until that transaction is successfully
+        committed (and the dirstate is invalidated on transaction abort).
+
+        The `changing_parents` operation is mutually exclusive with the
+        `changing_files` one.
+        """
         with self._changing(repo, CHANGE_TYPE_PARENTS) as c:
             yield c
 
     @contextlib.contextmanager
     def changing_files(self, repo):
+        """Wrap a dirstate change related to the set of tracked files
+
+        This context scopes a series of dirstate modifications that change the
+        set of tracked files. (typically `hg add`, `hg remove` etc) or some
+        dirstate stored information (like `hg rename --after`) but preserve
+        the working copy parents.
+
+        The dirstate's methods that perform this kind of modifications require
+        this context to be present before being called.
+        Such methods are decorated with `@requires_changing_files`.
+
+        The new dirstate contents will be written to disk when the top-most
+        `changing_files` context exits successfully. If an exception is raised
+        during a `changing_files` context of any level, all changes are
+        invalidated.  If this context is open within an open transaction, the
+        dirstate writing is delayed until that transaction is successfully
+        committed (and the dirstate is invalidated on transaction abort).
+
+        The `changing_files` operation is mutually exclusive with the
+        `changing_parents` one.
+        """
         with self._changing(repo, CHANGE_TYPE_FILES) as c:
             yield c
 
--- a/mercurial/dirstatemap.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/dirstatemap.py	Tue Nov 07 15:21:11 2023 +0100
@@ -377,7 +377,7 @@
             return
 
         # TODO: adjust this estimate for dirstate-v2
-        if util.safehasattr(parsers, 'dict_new_presized'):
+        if hasattr(parsers, 'dict_new_presized'):
             # Make an estimate of the number of files in the dirstate based on
             # its size. This trades wasting some memory for avoiding costly
             # resizes. Each entry have a prefix of 17 bytes followed by one or
--- a/mercurial/dispatch.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/dispatch.py	Tue Nov 07 15:21:11 2023 +0100
@@ -18,7 +18,6 @@
 
 
 from .i18n import _
-from .pycompat import getattr
 
 from hgdemandimport import tracing
 
@@ -107,7 +106,7 @@
 def _flushstdio(ui, err):
     status = None
     # In all cases we try to flush stdio streams.
-    if util.safehasattr(ui, 'fout'):
+    if hasattr(ui, 'fout'):
         assert ui is not None  # help pytype
         assert ui.fout is not None  # help pytype
         try:
@@ -116,7 +115,7 @@
             err = e
             status = -1
 
-    if util.safehasattr(ui, 'ferr'):
+    if hasattr(ui, 'ferr'):
         assert ui is not None  # help pytype
         assert ui.ferr is not None  # help pytype
         try:
@@ -170,7 +169,7 @@
             "newline": "\n",
             "line_buffering": sys.stdout.line_buffering,
         }
-        if util.safehasattr(sys.stdout, "write_through"):
+        if hasattr(sys.stdout, "write_through"):
             # pytype: disable=attribute-error
             kwargs["write_through"] = sys.stdout.write_through
             # pytype: enable=attribute-error
@@ -183,7 +182,7 @@
             "newline": "\n",
             "line_buffering": sys.stderr.line_buffering,
         }
-        if util.safehasattr(sys.stderr, "write_through"):
+        if hasattr(sys.stderr, "write_through"):
             # pytype: disable=attribute-error
             kwargs["write_through"] = sys.stderr.write_through
             # pytype: enable=attribute-error
@@ -520,7 +519,7 @@
 def aliasargs(fn, givenargs):
     args = []
     # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
-    if not util.safehasattr(fn, '_origfunc'):
+    if not hasattr(fn, '_origfunc'):
         args = getattr(fn, 'args', args)
     if args:
         cmd = b' '.join(map(procutil.shellquote, args))
@@ -708,7 +707,7 @@
         }
         if name not in adefaults:
             raise AttributeError(name)
-        if self.badalias or util.safehasattr(self, 'shell'):
+        if self.badalias or hasattr(self, 'shell'):
             return adefaults[name]
         return getattr(self.fn, name)
 
@@ -734,7 +733,7 @@
             self.name,
             self.definition,
         )
-        if util.safehasattr(self, 'shell'):
+        if hasattr(self, 'shell'):
             return self.fn(ui, *args, **opts)
         else:
             try:
@@ -1024,7 +1023,7 @@
     cmd = aliases[0]
     fn = entry[0]
 
-    if cmd and util.safehasattr(fn, 'shell'):
+    if cmd and hasattr(fn, 'shell'):
         # shell alias shouldn't receive early options which are consumed by hg
         _earlyopts, args = _earlysplitopts(args)
         d = lambda: fn(ui, *args[1:])
--- a/mercurial/encoding.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/encoding.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,6 @@
 import re
 import unicodedata
 
-from .pycompat import getattr
 from . import (
     error,
     policy,
@@ -80,6 +79,20 @@
 _nativeenviron = os.supports_bytes_environ
 if _nativeenviron:
     environ = os.environb  # re-exports
+    if pycompat.sysplatform == b'OpenVMS':
+        # workaround for a bug in VSI 3.10 port
+        # os.environb is only populated with a few Predefined symbols
+        def newget(self, key, default=None):
+            # pytype on linux does not understand OpenVMS special modules
+            import _decc  # pytype: disable=import-error
+
+            v = _decc.getenv(key, None)
+            if isinstance(key, bytes):
+                return default if v is None else v.encode('latin-1')
+            else:
+                return default if v is None else v
+
+        environ.__class__.get = newget
 else:
     # preferred encoding isn't known yet; use utf-8 to avoid unicode error
     # and recreate it once encoding is settled
--- a/mercurial/extensions.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/extensions.py	Tue Nov 07 15:21:11 2023 +0100
@@ -19,9 +19,7 @@
     gettext,
 )
 from .pycompat import (
-    getattr,
     open,
-    setattr,
 )
 
 from . import (
@@ -84,9 +82,8 @@
 
 
 def loadpath(path, module_name):
-    module_name = module_name.replace(b'.', b'_')
+    module_name = module_name.replace('.', '_')
     path = util.normpath(util.expandpath(path))
-    module_name = pycompat.fsdecode(module_name)
     path = pycompat.fsdecode(path)
     if os.path.isdir(path):
         # module/__init__.py style
@@ -106,30 +103,31 @@
 
 def _importh(name):
     """import and return the <name> module"""
-    mod = __import__(pycompat.sysstr(name))
-    components = name.split(b'.')
+    mod = __import__(name)
+    components = name.split('.')
     for comp in components[1:]:
         mod = getattr(mod, comp)
     return mod
 
 
 def _importext(name, path=None, reportfunc=None):
+    name = pycompat.fsdecode(name)
     if path:
         # the module will be loaded in sys.modules
         # choose an unique name so that it doesn't
         # conflicts with other modules
-        mod = loadpath(path, b'hgext.%s' % name)
+        mod = loadpath(path, 'hgext.%s' % name)
     else:
         try:
-            mod = _importh(b"hgext.%s" % name)
+            mod = _importh("hgext.%s" % name)
         except ImportError as err:
             if reportfunc:
-                reportfunc(err, b"hgext.%s" % name, b"hgext3rd.%s" % name)
+                reportfunc(err, "hgext.%s" % name, "hgext3rd.%s" % name)
             try:
-                mod = _importh(b"hgext3rd.%s" % name)
+                mod = _importh("hgext3rd.%s" % name)
             except ImportError as err:
                 if reportfunc:
-                    reportfunc(err, b"hgext3rd.%s" % name, name)
+                    reportfunc(err, "hgext3rd.%s" % name, name)
                 mod = _importh(name)
     return mod
 
@@ -140,9 +138,9 @@
     ui.log(
         b'extension',
         b'    - could not import %s (%s): trying %s\n',
-        failed,
+        stringutil.forcebytestr(failed),
         stringutil.forcebytestr(err),
-        next,
+        stringutil.forcebytestr(next),
     )
     if ui.debugflag and ui.configbool(b'devel', b'debug.extensions'):
         ui.traceback()
@@ -155,42 +153,43 @@
     elif isinstance(xs, dict):
         for k, v in xs.items():
             _rejectunicode(name, k)
-            _rejectunicode(b'%s.%s' % (name, stringutil.forcebytestr(k)), v)
-    elif isinstance(xs, type(u'')):
+            k = pycompat.sysstr(k)
+            _rejectunicode('%s.%s' % (name, k), v)
+    elif isinstance(xs, str):
         raise error.ProgrammingError(
-            b"unicode %r found in %s" % (xs, name),
+            b"unicode %r found in %s" % (xs, stringutil.forcebytestr(name)),
             hint=b"use b'' to make it byte string",
         )
 
 
 # attributes set by registrar.command
-_cmdfuncattrs = (b'norepo', b'optionalrepo', b'inferrepo')
+_cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
 
 
 def _validatecmdtable(ui, cmdtable):
     """Check if extension commands have required attributes"""
     for c, e in cmdtable.items():
         f = e[0]
-        missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
+        missing = [a for a in _cmdfuncattrs if not hasattr(f, a)]
         if not missing:
             continue
-        raise error.ProgrammingError(
-            b'missing attributes: %s' % b', '.join(missing),
-            hint=b"use @command decorator to register '%s'" % c,
-        )
+        msg = b'missing attributes: %s'
+        msg %= b', '.join([stringutil.forcebytestr(m) for m in missing])
+        hint = b"use @command decorator to register '%s'" % c
+        raise error.ProgrammingError(msg, hint=hint)
 
 
 def _validatetables(ui, mod):
     """Sanity check for loadable tables provided by extension module"""
-    for t in [b'cmdtable', b'colortable', b'configtable']:
+    for t in ['cmdtable', 'colortable', 'configtable']:
         _rejectunicode(t, getattr(mod, t, {}))
     for t in [
-        b'filesetpredicate',
-        b'internalmerge',
-        b'revsetpredicate',
-        b'templatefilter',
-        b'templatefunc',
-        b'templatekeyword',
+        'filesetpredicate',
+        'internalmerge',
+        'revsetpredicate',
+        'templatefilter',
+        'templatefunc',
+        'templatekeyword',
     ]:
         o = getattr(mod, t, None)
         if o:
@@ -349,7 +348,7 @@
     #
     # This one is for the list of item that must be run before running any setup
     earlyextraloaders = [
-        (b'configtable', configitems, b'loadconfigtable'),
+        ('configtable', configitems, 'loadconfigtable'),
     ]
 
     ui.log(b'extension', b'- loading configtable attributes\n')
@@ -434,14 +433,14 @@
     #   which takes (ui, extensionname, extraobj) arguments
     ui.log(b'extension', b'- loading extension registration objects\n')
     extraloaders = [
-        (b'cmdtable', commands, b'loadcmdtable'),
-        (b'colortable', color, b'loadcolortable'),
-        (b'filesetpredicate', fileset, b'loadpredicate'),
-        (b'internalmerge', filemerge, b'loadinternalmerge'),
-        (b'revsetpredicate', revset, b'loadpredicate'),
-        (b'templatefilter', templatefilters, b'loadfilter'),
-        (b'templatefunc', templatefuncs, b'loadfunction'),
-        (b'templatekeyword', templatekw, b'loadkeyword'),
+        ('cmdtable', commands, 'loadcmdtable'),
+        ('colortable', color, 'loadcolortable'),
+        ('filesetpredicate', fileset, 'loadpredicate'),
+        ('internalmerge', filemerge, 'loadinternalmerge'),
+        ('revsetpredicate', revset, 'loadpredicate'),
+        ('templatefilter', templatefilters, 'loadfilter'),
+        ('templatefunc', templatefuncs, 'loadfunction'),
+        ('templatekeyword', templatekw, 'loadkeyword'),
     ]
     with util.timedcm('load registration objects') as stats:
         _loadextra(ui, newindex, extraloaders)
@@ -625,6 +624,10 @@
 
     def __init__(self, container, funcname, wrapper):
         assert callable(wrapper)
+        if not isinstance(funcname, str):
+            msg = b"pass wrappedfunction target name as `str`, not `bytes`"
+            util.nouideprecwarn(msg, b"6.6", stacklevel=2)
+            funcname = pycompat.sysstr(funcname)
         self._container = container
         self._funcname = funcname
         self._wrapper = wrapper
@@ -671,6 +674,11 @@
     """
     assert callable(wrapper)
 
+    if not isinstance(funcname, str):
+        msg = b"pass wrapfunction target name as `str`, not `bytes`"
+        util.nouideprecwarn(msg, b"6.6", stacklevel=2)
+        funcname = pycompat.sysstr(funcname)
+
     origfn = getattr(container, funcname)
     assert callable(origfn)
     if inspect.ismodule(container):
@@ -732,7 +740,7 @@
 
     # The hgext might not have a __file__ attribute (e.g. in PyOxidizer) and
     # it might not be on a filesystem even if it does.
-    if util.safehasattr(hgext, '__file__'):
+    if hasattr(hgext, '__file__'):
         extpath = os.path.dirname(
             util.abspath(pycompat.fsencode(hgext.__file__))
         )
@@ -847,7 +855,7 @@
 
             # The extensions are filesystem based, so either an error occurred
             # or all are enabled.
-            if util.safehasattr(hgext, '__file__'):
+            if hasattr(hgext, '__file__'):
                 return
 
             if name in _order:  # enabled
@@ -977,13 +985,13 @@
 
 def moduleversion(module):
     '''return version information from given module as a string'''
-    if util.safehasattr(module, b'getversion') and callable(module.getversion):
+    if hasattr(module, 'getversion') and callable(module.getversion):
         try:
             version = module.getversion()
         except Exception:
             version = b'unknown'
 
-    elif util.safehasattr(module, b'__version__'):
+    elif hasattr(module, '__version__'):
         version = module.__version__
     else:
         version = b''
--- a/mercurial/exthelper.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/exthelper.py	Tue Nov 07 15:21:11 2023 +0100
@@ -325,7 +325,7 @@
             # Required, otherwise the function will not be wrapped
             uisetup = eh.finaluisetup
 
-            @eh.wrapfunction(discovery, b'checkheads')
+            @eh.wrapfunction(discovery, 'checkheads')
             def wrapcheckheads(orig, *args, **kwargs):
                 ui.note(b'His head smashed in and his heart cut out')
                 return orig(*args, **kwargs)
--- a/mercurial/filelog.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/filelog.py	Tue Nov 07 15:21:11 2023 +0100
@@ -107,11 +107,11 @@
     def iscensored(self, rev):
         return self._revlog.iscensored(rev)
 
-    def revision(self, node, _df=None):
-        return self._revlog.revision(node, _df=_df)
+    def revision(self, node):
+        return self._revlog.revision(node)
 
-    def rawdata(self, node, _df=None):
-        return self._revlog.rawdata(node, _df=_df)
+    def rawdata(self, node):
+        return self._revlog.rawdata(node)
 
     def emitrevisions(
         self,
--- a/mercurial/filemerge.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/filemerge.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,9 +16,6 @@
     hex,
     short,
 )
-from .pycompat import (
-    getattr,
-)
 
 from . import (
     encoding,
@@ -834,12 +831,13 @@
                 # avoid cycle cmdutil->merge->filemerge->extensions->cmdutil
                 from . import extensions
 
-                mod = extensions.loadpath(toolpath, b'hgmerge.%s' % tool)
+                mod_name = 'hgmerge.%s' % pycompat.sysstr(tool)
+                mod = extensions.loadpath(toolpath, mod_name)
             except Exception:
                 raise error.Abort(
                     _(b"loading python merge script failed: %s") % toolpath
                 )
-            mergefn = getattr(mod, scriptfn, None)
+            mergefn = getattr(mod, pycompat.sysstr(scriptfn), None)
             if mergefn is None:
                 raise error.Abort(
                     _(b"%s does not have function: %s") % (toolpath, scriptfn)
--- a/mercurial/fileset.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/fileset.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
 import re
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     error,
     filesetlang,
--- a/mercurial/filesetlang.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/filesetlang.py	Tue Nov 07 15:21:11 2023 +0100
@@ -7,7 +7,6 @@
 
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     error,
     parser,
--- a/mercurial/help.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/help.py	Tue Nov 07 15:21:11 2023 +0100
@@ -26,7 +26,6 @@
     _,
     gettext,
 )
-from .pycompat import getattr
 from . import (
     cmdutil,
     encoding,
@@ -43,7 +42,6 @@
     templatefuncs,
     templatekw,
     ui as uimod,
-    util,
 )
 from .hgweb import webcommands
 from .utils import (
@@ -810,7 +808,7 @@
         doc = gettext(pycompat.getdoc(entry[0]))
         if not doc:
             doc = _(b"(no help text available)")
-        if util.safehasattr(entry[0], 'definition'):  # aliased command
+        if hasattr(entry[0], 'definition'):  # aliased command
             source = entry[0].source
             if entry[0].definition.startswith(b'!'):  # shell alias
                 doc = _(b'shell alias for: %s\n\n%s\n\ndefined by: %s\n') % (
--- a/mercurial/hg.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hg.py	Tue Nov 07 15:21:11 2023 +0100
@@ -19,7 +19,6 @@
     sha1nodeconstants,
     short,
 )
-from .pycompat import getattr
 
 from . import (
     bookmarks,
@@ -66,7 +65,7 @@
 
 
 def addbranchrevs(lrepo, other, branches, revs, remotehidden=False):
-    if util.safehasattr(other, 'peer'):
+    if hasattr(other, 'peer'):
         # a courtesy to callers using a localrepo for other
         peer = other.peer(remotehidden=remotehidden)
     else:
@@ -174,7 +173,7 @@
             cls.instance  # make sure we load the module
         else:
             cls = LocalFactory
-        if util.safehasattr(cls, 'islocal'):
+        if hasattr(cls, 'islocal'):
             return cls.islocal(repo)  # pytype: disable=module-attr
         return False
     repo.ui.deprecwarn(b"use obj.local() instead of islocal(obj)", b"6.4")
@@ -254,7 +253,7 @@
     '''return a repository peer for the specified path'''
     ui = getattr(uiorrepo, 'ui', uiorrepo)
     rui = remoteui(uiorrepo, opts)
-    if util.safehasattr(path, 'url'):
+    if hasattr(path, 'url'):
         # this is already a urlutil.path object
         peer_path = path
     else:
@@ -317,7 +316,7 @@
     if repo.sharedpath == repo.path:
         return None
 
-    if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
+    if hasattr(repo, 'srcrepo') and repo.srcrepo:
         return repo.srcrepo
 
     # the sharedpath always ends in the .hg; we want the path to the repo
@@ -340,7 +339,7 @@
     '''create a shared repository'''
 
     not_local_msg = _(b'can only share local repositories')
-    if util.safehasattr(source, 'local'):
+    if hasattr(source, 'local'):
         if source.local() is None:
             raise error.Abort(not_local_msg)
     elif not islocal(source):
@@ -729,7 +728,7 @@
             branches = (src_path.branch, branch or [])
             source = src_path.loc
     else:
-        if util.safehasattr(source, 'peer'):
+        if hasattr(source, 'peer'):
             srcpeer = source.peer()  # in case we were called with a localrepo
         else:
             srcpeer = source
@@ -1567,7 +1566,7 @@
 
 def remoteui(src, opts):
     """build a remote ui from ui or repo and opts"""
-    if util.safehasattr(src, 'baseui'):  # looks like a repository
+    if hasattr(src, 'baseui'):  # looks like a repository
         dst = src.baseui.copy()  # drop repo-specific config
         src = src.ui  # copy target options from repo
     else:  # assume it's a global ui object
@@ -1599,10 +1598,10 @@
 # Used to check if the repository has changed looking at mtime and size of
 # these files.
 foi = [
-    (b'spath', b'00changelog.i'),
-    (b'spath', b'phaseroots'),  # ! phase can change content at the same size
-    (b'spath', b'obsstore'),
-    (b'path', b'bookmarks'),  # ! bookmark can change content at the same size
+    ('spath', b'00changelog.i'),
+    ('spath', b'phaseroots'),  # ! phase can change content at the same size
+    ('spath', b'obsstore'),
+    ('path', b'bookmarks'),  # ! bookmark can change content at the same size
 ]
 
 
--- a/mercurial/hgweb/common.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hgweb/common.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 
 from ..i18n import _
 from ..pycompat import (
-    getattr,
     open,
 )
 from .. import (
--- a/mercurial/hgweb/hgweb_mod.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hgweb/hgweb_mod.py	Tue Nov 07 15:21:11 2023 +0100
@@ -17,7 +17,6 @@
     permhooks,
     statusmessage,
 )
-from ..pycompat import getattr
 
 from .. import (
     encoding,
@@ -34,7 +33,6 @@
     templater,
     templateutil,
     ui as uimod,
-    util,
     wireprotoserver,
 )
 
@@ -403,7 +401,7 @@
                 cmd = cmd[style + 1 :]
 
             # avoid accepting e.g. style parameter as command
-            if util.safehasattr(webcommands, cmd):
+            if hasattr(webcommands, pycompat.sysstr(cmd)):
                 req.qsparams[b'cmd'] = cmd
 
             if cmd == b'static':
@@ -474,11 +472,11 @@
                 # override easily enough.
                 res.status = b'200 Script output follows'
                 res.headers[b'Content-Type'] = ctype
-                return getattr(webcommands, cmd)(rctx)
+                return getattr(webcommands, pycompat.sysstr(cmd))(rctx)
 
         except (error.LookupError, error.RepoLookupError) as err:
             msg = pycompat.bytestr(err)
-            if util.safehasattr(err, 'name') and not isinstance(
+            if hasattr(err, 'name') and not isinstance(
                 err, error.ManifestLookupError
             ):
                 msg = b'revision not found: %s' % err.name
--- a/mercurial/hgweb/server.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hgweb/server.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,7 +16,6 @@
 
 from ..i18n import _
 from ..pycompat import (
-    getattr,
     open,
 )
 
@@ -100,7 +99,7 @@
 
     def log_request(self, code='-', size='-'):
         xheaders = []
-        if util.safehasattr(self, 'headers'):
+        if hasattr(self, 'headers'):
             xheaders = [
                 h for h in self.headers.items() if h[0].startswith('x-')
             ]
@@ -214,7 +213,7 @@
         env['wsgi.multithread'] = isinstance(
             self.server, socketserver.ThreadingMixIn
         )
-        if util.safehasattr(socketserver, 'ForkingMixIn'):
+        if hasattr(socketserver, 'ForkingMixIn'):
             env['wsgi.multiprocess'] = isinstance(
                 self.server, socketserver.ForkingMixIn
             )
@@ -344,7 +343,7 @@
     threading.active_count()  # silence pyflakes and bypass demandimport
     _mixin = socketserver.ThreadingMixIn
 except ImportError:
-    if util.safehasattr(os, "fork"):
+    if hasattr(os, "fork"):
         _mixin = socketserver.ForkingMixIn
     else:
 
--- a/mercurial/hgweb/webcommands.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hgweb/webcommands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -13,7 +13,6 @@
 
 from ..i18n import _
 from ..node import hex, short
-from ..pycompat import getattr
 
 from .common import (
     ErrorResponse,
@@ -1050,7 +1049,9 @@
             }
 
     diffopts = webutil.difffeatureopts(web.req, web.repo.ui, b'annotate')
-    diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
+    diffopts = {
+        k: getattr(diffopts, pycompat.sysstr(k)) for k in diffopts.defaults
+    }
 
     return web.sendtemplate(
         b'fileannotate',
--- a/mercurial/hgweb/webutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hgweb/webutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -14,7 +14,6 @@
 
 from ..i18n import _
 from ..node import hex, short
-from ..pycompat import setattr
 
 from .common import (
     ErrorResponse,
@@ -211,7 +210,7 @@
             b'description': s.description(),
             b'branch': s.branch(),
         }
-        if util.safehasattr(s, 'path'):
+        if hasattr(s, 'path'):
             d[b'file'] = s.path()
         yield d
 
@@ -230,16 +229,16 @@
         ui, untrusted=True, section=section, whitespace=True
     )
 
-    for k in (
-        b'ignorews',
-        b'ignorewsamount',
-        b'ignorewseol',
-        b'ignoreblanklines',
+    for kb, ks in (
+        (b'ignorews', 'ignorews'),
+        (b'ignorewsamount', 'ignorewsamount'),
+        (b'ignorewseol', 'ignorewseol'),
+        (b'ignoreblanklines', 'ignoreblanklines'),
     ):
-        v = req.qsparams.get(k)
+        v = req.qsparams.get(kb)
         if v is not None:
             v = stringutil.parsebool(v)
-            setattr(diffopts, k, v if v is not None else True)
+            setattr(diffopts, ks, v if v is not None else True)
 
     return diffopts
 
--- a/mercurial/hgweb/wsgicgi.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hgweb/wsgicgi.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
 
 
-from ..pycompat import getattr
 from .. import encoding, pycompat
 
 from ..utils import procutil
--- a/mercurial/hook.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/hook.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,7 +12,6 @@
 import sys
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     demandimport,
     encoding,
@@ -40,13 +39,14 @@
 
     if callable(funcname):
         obj = funcname
-        funcname = pycompat.sysbytes(obj.__module__ + "." + obj.__name__)
+        funcname = obj.__module__ + "." + obj.__name__
     else:
-        d = funcname.rfind(b'.')
+        funcname = pycompat.sysstr(funcname)
+        d = funcname.rfind('.')
         if d == -1:
             raise error.HookLoadError(
                 _(b'%s hook is invalid: "%s" not in a module')
-                % (hname, funcname)
+                % (hname, stringutil.forcebytestr(funcname))
             )
         modname = funcname[:d]
         oldpaths = sys.path
@@ -89,27 +89,30 @@
                         )
                     else:
                         tracebackhint = None
-                    raise error.HookLoadError(
-                        _(b'%s hook is invalid: import of "%s" failed')
-                        % (hname, modname),
-                        hint=tracebackhint,
+                    msg = _(b'%s hook is invalid: import of "%s" failed')
+                    msg %= (
+                        stringutil.forcebytestr(hname),
+                        stringutil.forcebytestr(modname),
                     )
+                    raise error.HookLoadError(msg, hint=tracebackhint)
         sys.path = oldpaths
         try:
-            for p in funcname.split(b'.')[1:]:
+            for p in funcname.split('.')[1:]:
                 obj = getattr(obj, p)
         except AttributeError:
             raise error.HookLoadError(
                 _(b'%s hook is invalid: "%s" is not defined')
-                % (hname, funcname)
+                % (hname, stringutil.forcebytestr(funcname))
             )
         if not callable(obj):
             raise error.HookLoadError(
                 _(b'%s hook is invalid: "%s" is not callable')
-                % (hname, funcname)
+                % (hname, stringutil.forcebytestr(funcname))
             )
 
-    ui.note(_(b"calling hook %s: %s\n") % (hname, funcname))
+    ui.note(
+        _(b"calling hook %s: %s\n") % (hname, stringutil.forcebytestr(funcname))
+    )
     starttime = util.timer()
 
     try:
@@ -134,7 +137,7 @@
             b'pythonhook',
             b'pythonhook-%s: %s finished in %0.2f seconds\n',
             htype,
-            funcname,
+            stringutil.forcebytestr(funcname),
             duration,
         )
     if r:
@@ -347,11 +350,12 @@
                     if repo:
                         path = os.path.join(repo.root, path)
                     try:
-                        mod = extensions.loadpath(path, b'hghook.%s' % hname)
+                        mod_name = 'hghook.%s' % pycompat.sysstr(hname)
+                        mod = extensions.loadpath(path, mod_name)
                     except Exception:
                         ui.write(_(b"loading %s hook failed:\n") % hname)
                         raise
-                    hookfn = getattr(mod, cmd)
+                    hookfn = getattr(mod, pycompat.sysstr(cmd))
                 else:
                     hookfn = cmd[7:].strip()
                 r, raised = pythonhook(
--- a/mercurial/httppeer.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/httppeer.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 
 from concurrent import futures
 from .i18n import _
-from .pycompat import getattr
 from . import (
     bundle2,
     error,
@@ -65,7 +64,7 @@
 class _multifile:
     def __init__(self, *fileobjs):
         for f in fileobjs:
-            if not util.safehasattr(f, 'length'):
+            if not hasattr(f, 'length'):
                 raise ValueError(
                     b'_multifile only supports file objects that '
                     b'have a length but this one does not:',
@@ -180,7 +179,7 @@
     qs = b'?%s' % urlreq.urlencode(q)
     cu = b"%s%s" % (repobaseurl, qs)
     size = 0
-    if util.safehasattr(data, 'length'):
+    if hasattr(data, 'length'):
         size = data.length
     elif data is not None:
         size = len(data)
--- a/mercurial/i18n.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/i18n.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,6 @@
 import os
 import sys
 
-from .pycompat import getattr
 from .utils import resourceutil
 from . import (
     encoding,
--- a/mercurial/interfaces/repository.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/interfaces/repository.py	Tue Nov 07 15:21:11 2023 +0100
@@ -684,7 +684,7 @@
         Any metadata is excluded from size measurements.
         """
 
-    def revision(node, raw=False):
+    def revision(node):
         """Obtain fulltext data for a node.
 
         By default, any storage transformations are applied before the data
@@ -1229,13 +1229,6 @@
         """
     )
 
-    _generaldelta = interfaceutil.Attribute(
-        """Whether generaldelta storage is being used.
-
-        TODO this is revlog specific and should not be exposed.
-        """
-    )
-
     fulltextcache = interfaceutil.Attribute(
         """Dict with cache of fulltexts.
 
@@ -1282,10 +1275,10 @@
     def linkrev(rev):
         """Obtain the changeset revision number a revision is linked to."""
 
-    def revision(node, _df=None):
+    def revision(node):
         """Obtain fulltext data for a node."""
 
-    def rawdata(node, _df=None):
+    def rawdata(node):
         """Obtain raw data for a node."""
 
     def revdiff(rev1, rev2):
--- a/mercurial/keepalive.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/keepalive.py	Tue Nov 07 15:21:11 2023 +0100
@@ -90,7 +90,6 @@
 import threading
 
 from .i18n import _
-from .pycompat import getattr
 from .node import hex
 from . import (
     pycompat,
--- a/mercurial/localrepo.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/localrepo.py	Tue Nov 07 15:21:11 2023 +0100
@@ -28,10 +28,6 @@
     sha1nodeconstants,
     short,
 )
-from .pycompat import (
-    delattr,
-    getattr,
-)
 from . import (
     bookmarks,
     branchmap,
@@ -58,6 +54,7 @@
     obsolete,
     pathutil,
     phases,
+    policy,
     pushkey,
     pycompat,
     rcutil,
@@ -419,7 +416,7 @@
             try:
                 bundle = exchange.readbundle(self.ui, bundle, None)
                 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
-                if util.safehasattr(ret, 'getchunks'):
+                if hasattr(ret, 'getchunks'):
                     # This is a bundle20 object, turn it into an unbundler.
                     # This little dance should be dropped eventually when the
                     # API is finally improved.
@@ -1071,6 +1068,10 @@
     options = {}
     options[b'flagprocessors'] = {}
 
+    feature_config = options[b'feature-config'] = revlog.FeatureConfig()
+    data_config = options[b'data-config'] = revlog.DataConfig()
+    delta_config = options[b'delta-config'] = revlog.DeltaConfig()
+
     if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
         options[b'revlogv1'] = True
     if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
@@ -1086,18 +1087,26 @@
     # experimental config: format.chunkcachesize
     chunkcachesize = ui.configint(b'format', b'chunkcachesize')
     if chunkcachesize is not None:
-        options[b'chunkcachesize'] = chunkcachesize
-
-    deltabothparents = ui.configbool(
+        data_config.chunk_cache_size = chunkcachesize
+
+    if ui.configbool(b'experimental', b'revlog.uncompressed-cache.enabled'):
+        factor = ui.configint(
+            b'experimental', b'revlog.uncompressed-cache.factor'
+        )
+        count = ui.configint(
+            b'experimental', b'revlog.uncompressed-cache.count'
+        )
+        data_config.uncompressed_cache_factor = factor
+        data_config.uncompressed_cache_count = count
+
+    delta_config.delta_both_parents = ui.configbool(
         b'storage', b'revlog.optimize-delta-parent-choice'
     )
-    options[b'deltabothparents'] = deltabothparents
-    dps_cgds = ui.configint(
+    delta_config.candidate_group_chunk_size = ui.configint(
         b'storage',
         b'revlog.delta-parent-search.candidate-group-chunk-size',
     )
-    options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
-    options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
+    delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
 
     issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
     options[b'issue6528.fix-incoming'] = issue6528
@@ -1108,32 +1117,33 @@
         lazydeltabase = ui.configbool(
             b'storage', b'revlog.reuse-external-delta-parent'
         )
-    if lazydeltabase is None:
-        lazydeltabase = not scmutil.gddeltaconfig(ui)
-    options[b'lazydelta'] = lazydelta
-    options[b'lazydeltabase'] = lazydeltabase
+        if lazydeltabase is None:
+            lazydeltabase = not scmutil.gddeltaconfig(ui)
+    delta_config.lazy_delta = lazydelta
+    delta_config.lazy_delta_base = lazydeltabase
 
     chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
     if 0 <= chainspan:
-        options[b'maxdeltachainspan'] = chainspan
+        delta_config.max_deltachain_span = chainspan
 
     mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
     if mmapindexthreshold is not None:
-        options[b'mmapindexthreshold'] = mmapindexthreshold
+        data_config.mmap_index_threshold = mmapindexthreshold
 
     withsparseread = ui.configbool(b'experimental', b'sparse-read')
     srdensitythres = float(
         ui.config(b'experimental', b'sparse-read.density-threshold')
     )
     srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
-    options[b'with-sparse-read'] = withsparseread
-    options[b'sparse-read-density-threshold'] = srdensitythres
-    options[b'sparse-read-min-gap-size'] = srmingapsize
+    data_config.with_sparse_read = withsparseread
+    data_config.sr_density_threshold = srdensitythres
+    data_config.sr_min_gap_size = srmingapsize
 
     sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
-    options[b'sparse-revlog'] = sparserevlog
+    delta_config.sparse_revlog = sparserevlog
     if sparserevlog:
         options[b'generaldelta'] = True
+        data_config.with_sparse_read = True
 
     maxchainlen = None
     if sparserevlog:
@@ -1141,7 +1151,7 @@
     # experimental config: format.maxchainlen
     maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
     if maxchainlen is not None:
-        options[b'maxchainlen'] = maxchainlen
+        delta_config.max_chain_len = maxchainlen
 
     for r in requirements:
         # we allow multiple compression engine requirement to co-exist because
@@ -1150,21 +1160,23 @@
         # The compression used for new entries will be "the last one"
         prefix = r.startswith
         if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
-            options[b'compengine'] = r.split(b'-', 2)[2]
-
-    options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
-    if options[b'zlib.level'] is not None:
-        if not (0 <= options[b'zlib.level'] <= 9):
+            feature_config.compression_engine = r.split(b'-', 2)[2]
+
+    zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
+    if zlib_level is not None:
+        if not (0 <= zlib_level <= 9):
             msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
-            raise error.Abort(msg % options[b'zlib.level'])
-    options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
-    if options[b'zstd.level'] is not None:
-        if not (0 <= options[b'zstd.level'] <= 22):
+            raise error.Abort(msg % zlib_level)
+    feature_config.compression_engine_options[b'zlib.level'] = zlib_level
+    zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
+    if zstd_level is not None:
+        if not (0 <= zstd_level <= 22):
             msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
-            raise error.Abort(msg % options[b'zstd.level'])
+            raise error.Abort(msg % zstd_level)
+    feature_config.compression_engine_options[b'zstd.level'] = zstd_level
 
     if requirementsmod.NARROW_REQUIREMENT in requirements:
-        options[b'enableellipsis'] = True
+        feature_config.enable_ellipsis = True
 
     if ui.configbool(b'experimental', b'rust.index'):
         options[b'rust.index'] = True
@@ -1460,7 +1472,7 @@
         if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
             b'devel', b'check-locks'
         ):
-            if util.safehasattr(self.svfs, 'vfs'):  # this is filtervfs
+            if hasattr(self.svfs, 'vfs'):  # this is filtervfs
                 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
             else:  # standard vfs
                 self.svfs.audit = self._getsvfsward(self.svfs.audit)
@@ -1522,8 +1534,8 @@
             repo = rref()
             if (
                 repo is None
-                or not util.safehasattr(repo, '_wlockref')
-                or not util.safehasattr(repo, '_lockref')
+                or not hasattr(repo, '_wlockref')
+                or not hasattr(repo, '_lockref')
             ):
                 return
             if mode in (None, b'r', b'rb'):
@@ -1571,7 +1583,7 @@
         def checksvfs(path, mode=None):
             ret = origfunc(path, mode=mode)
             repo = rref()
-            if repo is None or not util.safehasattr(repo, '_lockref'):
+            if repo is None or not hasattr(repo, '_lockref'):
                 return
             if mode in (None, b'r', b'rb'):
                 return
@@ -3017,7 +3029,7 @@
             if (
                 k == b'changelog'
                 and self.currenttransaction()
-                and self.changelog._delayed
+                and self.changelog.is_delaying
             ):
                 # The changelog object may store unwritten revisions. We don't
                 # want to lose them.
@@ -3027,7 +3039,11 @@
             if clearfilecache:
                 del self._filecache[k]
             try:
-                delattr(unfiltered, k)
+                # XXX ideally, the key would be a unicode string to match the
+                # fact it refers to an attribut name. However changing this was
+                # a bit a scope creep compared to the series cleaning up
+                # del/set/getattr so we kept thing simple here.
+                delattr(unfiltered, pycompat.sysstr(k))
             except AttributeError:
                 pass
         self.invalidatecaches()
@@ -3763,7 +3779,11 @@
     if ui.configbool(b'format', b'bookmarks-in-store'):
         requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
 
-    if ui.configbool(b'format', b'use-persistent-nodemap'):
+    # The feature is disabled unless a fast implementation is available.
+    persistent_nodemap_default = policy.importrust('revlog') is not None
+    if ui.configbool(
+        b'format', b'use-persistent-nodemap', persistent_nodemap_default
+    ):
         requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
 
     # if share-safe is enabled, let's create the new repository with the new
--- a/mercurial/lock.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/lock.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 import warnings
 
 from .i18n import _
-from .pycompat import getattr
 
 from . import (
     encoding,
--- a/mercurial/lsprof.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/lsprof.py	Tue Nov 07 15:21:11 2023 +0100
@@ -1,8 +1,6 @@
 import _lsprof
 import sys
 
-from .pycompat import getattr
-
 Profiler = _lsprof.Profiler
 
 # PyPy doesn't expose profiler_entry from the module.
--- a/mercurial/mail.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/mail.py	Tue Nov 07 15:21:11 2023 +0100
@@ -20,7 +20,6 @@
 
 from .i18n import _
 from .pycompat import (
-    getattr,
     open,
 )
 from . import (
--- a/mercurial/manifest.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/manifest.py	Tue Nov 07 15:21:11 2023 +0100
@@ -17,7 +17,6 @@
     hex,
     nullrev,
 )
-from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -1615,7 +1614,6 @@
         )
 
         self.index = self._revlog.index
-        self._generaldelta = self._revlog._generaldelta
 
     def get_revlog(self):
         """return an actual revlog instance if any
@@ -1628,7 +1626,7 @@
 
     def _setupmanifestcachehooks(self, repo):
         """Persist the manifestfulltextcache on lock release"""
-        if not util.safehasattr(repo, '_wlockref'):
+        if not hasattr(repo, '_wlockref'):
             return
 
         self._fulltextcache._opener = repo.wcachevfs
@@ -1822,11 +1820,11 @@
     def checksize(self):
         return self._revlog.checksize()
 
-    def revision(self, node, _df=None):
-        return self._revlog.revision(node, _df=_df)
+    def revision(self, node):
+        return self._revlog.revision(node)
 
-    def rawdata(self, node, _df=None):
-        return self._revlog.rawdata(node, _df=_df)
+    def rawdata(self, node):
+        return self._revlog.rawdata(node)
 
     def revdiff(self, rev1, rev2):
         return self._revlog.revdiff(rev1, rev2)
--- a/mercurial/mdiff.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/mdiff.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,10 +11,6 @@
 import zlib
 
 from .i18n import _
-from .pycompat import (
-    getattr,
-    setattr,
-)
 from . import (
     diffhelper,
     encoding,
@@ -78,7 +74,7 @@
             v = opts.get(k)
             if v is None:
                 v = self.defaults[k]
-            setattr(self, k, v)
+            setattr(self, pycompat.sysstr(k), v)
 
         try:
             self.context = int(self.context)
@@ -89,14 +85,15 @@
             )
 
     def copy(self, **kwargs):
-        opts = {k: getattr(self, k) for k in self.defaults}
+        opts = {k: getattr(self, pycompat.sysstr(k)) for k in self.defaults}
         opts = pycompat.strkwargs(opts)
         opts.update(kwargs)
         return diffopts(**opts)
 
     def __bytes__(self):
         return b", ".join(
-            b"%s: %r" % (k, getattr(self, k)) for k in self.defaults
+            b"%s: %r" % (k, getattr(self, pycompat.sysstr(k)))
+            for k in self.defaults
         )
 
     __str__ = encoding.strmethod(__bytes__)
@@ -210,11 +207,7 @@
 
 
 def chooseblocksfunc(opts=None):
-    if (
-        opts is None
-        or not opts.xdiff
-        or not util.safehasattr(bdiff, 'xdiffblocks')
-    ):
+    if opts is None or not opts.xdiff or not hasattr(bdiff, 'xdiffblocks'):
         return bdiff.blocks
     else:
         return bdiff.xdiffblocks
--- a/mercurial/narrowspec.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/narrowspec.py	Tue Nov 07 15:21:11 2023 +0100
@@ -8,7 +8,6 @@
 import weakref
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     error,
     match as matchmod,
--- a/mercurial/obsolete.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/obsolete.py	Tue Nov 07 15:21:11 2023 +0100
@@ -73,7 +73,6 @@
 import weakref
 
 from .i18n import _
-from .pycompat import getattr
 from .node import (
     bin,
     hex,
@@ -940,7 +939,7 @@
 
 def _mutablerevs(repo):
     """the set of mutable revision in the repository"""
-    return repo._phasecache.getrevset(repo, phases.mutablephases)
+    return repo._phasecache.getrevset(repo, phases.relevant_mutable_phases)
 
 
 @cachefor(b'obsolete')
@@ -994,7 +993,8 @@
     torev = cl.index.get_rev
     tonode = cl.node
     obsstore = repo.obsstore
-    for rev in repo.revs(b'(not public()) and (not obsolete())'):
+    candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
+    for rev in candidates:
         # We only evaluate mutable, non-obsolete revision
         node = tonode(rev)
         # (future) A cache of predecessors may worth if split is very common
@@ -1016,7 +1016,8 @@
     obsstore = repo.obsstore
     newermap = {}
     tonode = repo.changelog.node
-    for rev in repo.revs(b'(not public()) - obsolete()'):
+    candidates = sorted(_mutablerevs(repo) - getrevs(repo, b"obsolete"))
+    for rev in candidates:
         node = tonode(rev)
         mark = obsstore.predecessors.get(node, ())
         toprocess = set(mark)
--- a/mercurial/patch.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/patch.py	Tue Nov 07 15:21:11 2023 +0100
@@ -168,7 +168,7 @@
 
     mimeheaders = [b'content-type']
 
-    if not util.safehasattr(stream, 'next'):
+    if not hasattr(stream, 'next'):
         # http responses, for example, have readline but not next
         stream = fiter(stream)
 
@@ -1703,7 +1703,7 @@
 
     newhunks = []
     for c in hunks:
-        if util.safehasattr(c, 'reversehunk'):
+        if hasattr(c, 'reversehunk'):
             c = c.reversehunk()
         newhunks.append(c)
     return newhunks
--- a/mercurial/pathutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/pathutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -377,7 +377,7 @@
         return d in self._dirs
 
 
-if util.safehasattr(parsers, 'dirs'):
+if hasattr(parsers, 'dirs'):
     dirs = parsers.dirs
 
 if rustdirs is not None:
--- a/mercurial/phases.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/phases.py	Tue Nov 07 15:21:11 2023 +0100
@@ -111,10 +111,6 @@
     short,
     wdirrev,
 )
-from .pycompat import (
-    getattr,
-    setattr,
-)
 from . import (
     error,
     pycompat,
@@ -169,6 +165,7 @@
 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
 # record phase property
 mutablephases = (draft, secret, archived, internal)
+relevant_mutable_phases = (draft, secret)  # could be obsolete or unstable
 remotehiddenphases = (secret, archived, internal)
 localhiddenphases = (internal, archived)
 
--- a/mercurial/policy.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/policy.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,8 +9,6 @@
 import os
 import sys
 
-from .pycompat import getattr
-
 # Rules for how modules can be loaded. Values are:
 #
 #    c - require C extensions
--- a/mercurial/posix.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/posix.py	Tue Nov 07 15:21:11 2023 +0100
@@ -36,7 +36,6 @@
 
 from .i18n import _
 from .pycompat import (
-    getattr,
     open,
 )
 from . import (
@@ -554,6 +553,12 @@
         return False
 
 
+if pycompat.sysplatform == b'OpenVMS':
+    # OpenVMS's symlink emulation is broken on some OpenVMS versions.
+    def checklink(path):
+        return False
+
+
 _needsshellquote: Optional[Match[bytes]] = None
 
 
--- a/mercurial/profiling.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/profiling.py	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,6 @@
 
 from .i18n import _
 from .pycompat import (
-    getattr,
     open,
 )
 from . import (
--- a/mercurial/pure/osutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/pure/osutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,6 @@
 import os
 import stat as statmod
 
-from ..pycompat import getattr
 from .. import (
     encoding,
     pycompat,
--- a/mercurial/pvec.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/pvec.py	Tue Nov 07 15:21:11 2023 +0100
@@ -159,7 +159,7 @@
 def ctxpvec(ctx):
     '''construct a pvec for ctx while filling in the cache'''
     r = ctx.repo()
-    if not util.safehasattr(r, "_pveccache"):
+    if not hasattr(r, "_pveccache"):
         r._pveccache = {}
     pvc = r._pveccache
     if ctx.rev() not in pvc:
--- a/mercurial/pycompat.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/pycompat.py	Tue Nov 07 15:21:11 2023 +0100
@@ -355,6 +355,13 @@
 def _wrapattrfunc(f):
     @functools.wraps(f)
     def w(object, name, *args):
+        if isinstance(name, bytes):
+            from . import util
+
+            msg = b'function "%s" take `str` as argument, not `bytes`'
+            fname = f.__name__.encode('ascii')
+            msg %= fname
+            util.nouideprecwarn(msg, b"6.6", stacklevel=2)
         return f(object, sysstr(name), *args)
 
     return w
--- a/mercurial/registrar.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/registrar.py	Tue Nov 07 15:21:11 2023 +0100
@@ -6,11 +6,11 @@
 # GNU General Public License version 2 or any later version.
 
 
+from typing import Any, List, Optional, Tuple
 from . import (
     configitems,
     error,
     pycompat,
-    util,
 )
 
 # unlike the other registered items, config options are neither functions or
@@ -64,7 +64,7 @@
             msg = b'duplicate registration for name: "%s"' % name
             raise error.ProgrammingError(msg)
 
-        if func.__doc__ and not util.safehasattr(func, '_origdoc'):
+        if func.__doc__ and not hasattr(func, '_origdoc'):
             func._origdoc = func.__doc__.strip()
             doc = pycompat.sysbytes(func._origdoc)
             func.__doc__ = pycompat.sysstr(self._formatdoc(decl, doc))
@@ -534,3 +534,30 @@
 
         # actual capabilities, which this internal merge tool has
         func.capabilities = {b"binary": binarycap, b"symlink": symlinkcap}
+
+
+class verify_check(_funcregistrarbase):
+    """Decorator to register a check for admin::verify
+
+    options is a list of (name, default value, help) to be passed to the check
+    """
+
+    def __init__(self, table=None, alias_table=None):
+        super().__init__(table)
+        if alias_table is None:
+            self._alias_table = {}
+        else:
+            self._alias_table = alias_table
+
+    def _extrasetup(
+        self,
+        name,
+        func,
+        alias: Optional[bytes] = None,
+        options: Optional[List[Tuple[bytes, Any, bytes]]] = None,
+    ):
+        func.alias = alias
+        func.options = options
+
+        if alias:
+            self._alias_table[alias] = name
--- a/mercurial/repocache.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/repocache.py	Tue Nov 07 15:21:11 2023 +0100
@@ -129,7 +129,7 @@
     srcfilecache = srcrepo._filecache
     if b'changelog' in srcfilecache:
         destfilecache[b'changelog'] = ce = srcfilecache[b'changelog']
-        ce.obj.opener = ce.obj._realopener = destrepo.svfs
+        ce.obj.opener = ce.obj._inner.opener = destrepo.svfs
     if b'obsstore' in srcfilecache:
         destfilecache[b'obsstore'] = ce = srcfilecache[b'obsstore']
         ce.obj.svfs = destrepo.svfs
--- a/mercurial/repoview.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/repoview.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,11 +15,6 @@
     hex,
     nullrev,
 )
-from .pycompat import (
-    delattr,
-    getattr,
-    setattr,
-)
 from . import (
     error,
     obsolete,
@@ -296,13 +291,12 @@
         This returns a version of 'revs' to be used thereafter by the caller.
         In particular, if revs is an iterator, it is converted into a set.
         """
-        safehasattr = util.safehasattr
-        if safehasattr(revs, '__next__'):
+        if hasattr(revs, '__next__'):
             # Note that inspect.isgenerator() is not true for iterators,
             revs = set(revs)
 
         filteredrevs = self.filteredrevs
-        if safehasattr(revs, 'first'):  # smartset
+        if hasattr(revs, 'first'):  # smartset
             offenders = revs & filteredrevs
         else:
             offenders = filteredrevs.intersection(revs)
@@ -422,7 +416,7 @@
         with util.timedcm('repo filter for %s', self.filtername):
             revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
         cl = self._clcache
-        newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
+        newkey = (unfilen, unfinode, hash(revs), unfichangelog.is_delaying)
         # if cl.index is not unfiindex, unfi.changelog would be
         # recreated, and our clcache refers to garbage object
         if cl is not None and (
--- a/mercurial/revlog.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlog.py	Tue Nov 07 15:21:11 2023 +0100
@@ -32,7 +32,6 @@
     wdirrev,
 )
 from .i18n import _
-from .pycompat import getattr
 from .revlogutils.constants import (
     ALL_KINDS,
     CHANGELOGV2,
@@ -167,7 +166,7 @@
 # We also consider we have a "fast" implementation in "pure" python because
 # people using pure don't really have performance consideration (and a
 # wheelbarrow of other slowness source)
-HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
+HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or hasattr(
     parsers, 'BaseIndexObject'
 )
 
@@ -214,7 +213,7 @@
     return index, cache
 
 
-if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+if hasattr(parsers, 'parse_index_devel_nodemap'):
 
     def parse_index_v1_nodemap(data, inline):
         index, cache = parsers.parse_index_devel_nodemap(data, inline)
@@ -242,6 +241,993 @@
 hexdigits = b'0123456789abcdefABCDEF'
 
 
+class _Config:
+    def copy(self):
+        return self.__class__(**self.__dict__)
+
+
+@attr.s()
+class FeatureConfig(_Config):
+    """Hold configuration values about the available revlog features"""
+
+    # the default compression engine
+    compression_engine = attr.ib(default=b'zlib')
+    # compression engines options
+    compression_engine_options = attr.ib(default=attr.Factory(dict))
+
+    # can we use censor on this revlog
+    censorable = attr.ib(default=False)
+    # does this revlog use the "side data" feature
+    has_side_data = attr.ib(default=False)
+    # might remove rank configuration once the computation has no impact
+    compute_rank = attr.ib(default=False)
+    # parent order is supposed to be semantically irrelevant, so we
+    # normally resort parents to ensure that the first parent is non-null,
+    # if there is a non-null parent at all.
+    # filelog abuses the parent order as flag to mark some instances of
+    # meta-encoded files, so allow it to disable this behavior.
+    canonical_parent_order = attr.ib(default=False)
+    # can ellipsis commit be used
+    enable_ellipsis = attr.ib(default=False)
+
+    def copy(self):
+        new = super().copy()
+        new.compression_engine_options = self.compression_engine_options.copy()
+        return new
+
+
+@attr.s()
+class DataConfig(_Config):
+    """Hold configuration value about how the revlog data are read"""
+
+    # should we try to open the "pending" version of the revlog
+    try_pending = attr.ib(default=False)
+    # should we try to open the "splitted" version of the revlog
+    try_split = attr.ib(default=False)
+    #  When True, indexfile should be opened with checkambig=True at writing,
+    #  to avoid file stat ambiguity.
+    check_ambig = attr.ib(default=False)
+
+    # If true, use mmap instead of reading to deal with large index
+    mmap_large_index = attr.ib(default=False)
+    # how much data is large
+    mmap_index_threshold = attr.ib(default=None)
+    # How much data to read and cache into the raw revlog data cache.
+    chunk_cache_size = attr.ib(default=65536)
+
+    # The size of the uncompressed cache compared to the largest revision seen.
+    uncompressed_cache_factor = attr.ib(default=None)
+
+    # The number of chunk cached
+    uncompressed_cache_count = attr.ib(default=None)
+
+    # Allow sparse reading of the revlog data
+    with_sparse_read = attr.ib(default=False)
+    # minimal density of a sparse read chunk
+    sr_density_threshold = attr.ib(default=0.50)
+    # minimal size of data we skip when performing sparse read
+    sr_min_gap_size = attr.ib(default=262144)
+
+    # are delta encoded against arbitrary bases.
+    generaldelta = attr.ib(default=False)
+
+
+@attr.s()
+class DeltaConfig(_Config):
+    """Hold configuration value about how new delta are computed
+
+    Some attributes are duplicated from DataConfig to help havign each object
+    self contained.
+    """
+
+    # can delta be encoded against arbitrary bases.
+    general_delta = attr.ib(default=False)
+    # Allow sparse writing of the revlog data
+    sparse_revlog = attr.ib(default=False)
+    # maximum length of a delta chain
+    max_chain_len = attr.ib(default=None)
+    # Maximum distance between delta chain base start and end
+    max_deltachain_span = attr.ib(default=-1)
+    # If `upper_bound_comp` is not None, this is the expected maximal gain from
+    # compression for the data content.
+    upper_bound_comp = attr.ib(default=None)
+    # Should we try a delta against both parent
+    delta_both_parents = attr.ib(default=True)
+    # Test delta base candidate group by chunk of this maximal size.
+    candidate_group_chunk_size = attr.ib(default=0)
+    # Should we display debug information about delta computation
+    debug_delta = attr.ib(default=False)
+    # trust incoming delta by default
+    lazy_delta = attr.ib(default=True)
+    # trust the base of incoming delta by default
+    lazy_delta_base = attr.ib(default=False)
+
+
+class _InnerRevlog:
+    """An inner layer of the revlog object
+
+    That layer exist to be able to delegate some operation to Rust, its
+    boundaries are arbitrary and based on what we can delegate to Rust.
+    """
+
+    def __init__(
+        self,
+        opener,
+        index,
+        index_file,
+        data_file,
+        sidedata_file,
+        inline,
+        data_config,
+        delta_config,
+        feature_config,
+        chunk_cache,
+        default_compression_header,
+    ):
+        self.opener = opener
+        self.index = index
+
+        self.__index_file = index_file
+        self.data_file = data_file
+        self.sidedata_file = sidedata_file
+        self.inline = inline
+        self.data_config = data_config
+        self.delta_config = delta_config
+        self.feature_config = feature_config
+
+        # used during diverted write.
+        self._orig_index_file = None
+
+        self._default_compression_header = default_compression_header
+
+        # index
+
+        # 3-tuple of file handles being used for active writing.
+        self._writinghandles = None
+
+        self._segmentfile = randomaccessfile.randomaccessfile(
+            self.opener,
+            (self.index_file if self.inline else self.data_file),
+            self.data_config.chunk_cache_size,
+            chunk_cache,
+        )
+        self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
+            self.opener,
+            self.sidedata_file,
+            self.data_config.chunk_cache_size,
+        )
+
+        # revlog header -> revlog compressor
+        self._decompressors = {}
+        # 3-tuple of (node, rev, text) for a raw revision.
+        self._revisioncache = None
+
+        # cache some uncompressed chunks
+        # rev → uncompressed_chunk
+        #
+        # the max cost is dynamically updated to be proportionnal to the
+        # size of revision we actually encounter.
+        self._uncompressed_chunk_cache = None
+        if self.data_config.uncompressed_cache_factor is not None:
+            self._uncompressed_chunk_cache = util.lrucachedict(
+                self.data_config.uncompressed_cache_count,
+                maxcost=65536,  # some arbitrary initial value
+            )
+
+        self._delay_buffer = None
+
+    @property
+    def index_file(self):
+        return self.__index_file
+
+    @index_file.setter
+    def index_file(self, new_index_file):
+        self.__index_file = new_index_file
+        if self.inline:
+            self._segmentfile.filename = new_index_file
+
+    def __len__(self):
+        return len(self.index)
+
+    def clear_cache(self):
+        assert not self.is_delaying
+        self._revisioncache = None
+        if self._uncompressed_chunk_cache is not None:
+            self._uncompressed_chunk_cache.clear()
+        self._segmentfile.clear_cache()
+        self._segmentfile_sidedata.clear_cache()
+
+    @property
+    def canonical_index_file(self):
+        if self._orig_index_file is not None:
+            return self._orig_index_file
+        return self.index_file
+
+    @property
+    def is_delaying(self):
+        """is the revlog is currently delaying the visibility of written data?
+
+        The delaying mechanism can be either in-memory or written on disk in a
+        side-file."""
+        return (self._delay_buffer is not None) or (
+            self._orig_index_file is not None
+        )
+
+    # Derived from index values.
+
+    def start(self, rev):
+        """the offset of the data chunk for this revision"""
+        return int(self.index[rev][0] >> 16)
+
+    def length(self, rev):
+        """the length of the data chunk for this revision"""
+        return self.index[rev][1]
+
+    def end(self, rev):
+        """the end of the data chunk for this revision"""
+        return self.start(rev) + self.length(rev)
+
+    def deltaparent(self, rev):
+        """return deltaparent of the given revision"""
+        base = self.index[rev][3]
+        if base == rev:
+            return nullrev
+        elif self.delta_config.general_delta:
+            return base
+        else:
+            return rev - 1
+
+    def issnapshot(self, rev):
+        """tells whether rev is a snapshot"""
+        if not self.delta_config.sparse_revlog:
+            return self.deltaparent(rev) == nullrev
+        elif hasattr(self.index, 'issnapshot'):
+            # directly assign the method to cache the testing and access
+            self.issnapshot = self.index.issnapshot
+            return self.issnapshot(rev)
+        if rev == nullrev:
+            return True
+        entry = self.index[rev]
+        base = entry[3]
+        if base == rev:
+            return True
+        if base == nullrev:
+            return True
+        p1 = entry[5]
+        while self.length(p1) == 0:
+            b = self.deltaparent(p1)
+            if b == p1:
+                break
+            p1 = b
+        p2 = entry[6]
+        while self.length(p2) == 0:
+            b = self.deltaparent(p2)
+            if b == p2:
+                break
+            p2 = b
+        if base == p1 or base == p2:
+            return False
+        return self.issnapshot(base)
+
+    def _deltachain(self, rev, stoprev=None):
+        """Obtain the delta chain for a revision.
+
+        ``stoprev`` specifies a revision to stop at. If not specified, we
+        stop at the base of the chain.
+
+        Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
+        revs in ascending order and ``stopped`` is a bool indicating whether
+        ``stoprev`` was hit.
+        """
+        generaldelta = self.delta_config.general_delta
+        # Try C implementation.
+        try:
+            return self.index.deltachain(rev, stoprev, generaldelta)
+        except AttributeError:
+            pass
+
+        chain = []
+
+        # Alias to prevent attribute lookup in tight loop.
+        index = self.index
+
+        iterrev = rev
+        e = index[iterrev]
+        while iterrev != e[3] and iterrev != stoprev:
+            chain.append(iterrev)
+            if generaldelta:
+                iterrev = e[3]
+            else:
+                iterrev -= 1
+            e = index[iterrev]
+
+        if iterrev == stoprev:
+            stopped = True
+        else:
+            chain.append(iterrev)
+            stopped = False
+
+        chain.reverse()
+        return chain, stopped
+
+    @util.propertycache
+    def _compressor(self):
+        engine = util.compengines[self.feature_config.compression_engine]
+        return engine.revlogcompressor(
+            self.feature_config.compression_engine_options
+        )
+
+    @util.propertycache
+    def _decompressor(self):
+        """the default decompressor"""
+        if self._default_compression_header is None:
+            return None
+        t = self._default_compression_header
+        c = self._get_decompressor(t)
+        return c.decompress
+
+    def _get_decompressor(self, t):
+        try:
+            compressor = self._decompressors[t]
+        except KeyError:
+            try:
+                engine = util.compengines.forrevlogheader(t)
+                compressor = engine.revlogcompressor(
+                    self.feature_config.compression_engine_options
+                )
+                self._decompressors[t] = compressor
+            except KeyError:
+                raise error.RevlogError(
+                    _(b'unknown compression type %s') % binascii.hexlify(t)
+                )
+        return compressor
+
+    def compress(self, data):
+        """Generate a possibly-compressed representation of data."""
+        if not data:
+            return b'', data
+
+        compressed = self._compressor.compress(data)
+
+        if compressed:
+            # The revlog compressor added the header in the returned data.
+            return b'', compressed
+
+        if data[0:1] == b'\0':
+            return b'', data
+        return b'u', data
+
+    def decompress(self, data):
+        """Decompress a revlog chunk.
+
+        The chunk is expected to begin with a header identifying the
+        format type so it can be routed to an appropriate decompressor.
+        """
+        if not data:
+            return data
+
+        # Revlogs are read much more frequently than they are written and many
+        # chunks only take microseconds to decompress, so performance is
+        # important here.
+        #
+        # We can make a few assumptions about revlogs:
+        #
+        # 1) the majority of chunks will be compressed (as opposed to inline
+        #    raw data).
+        # 2) decompressing *any* data will likely by at least 10x slower than
+        #    returning raw inline data.
+        # 3) we want to prioritize common and officially supported compression
+        #    engines
+        #
+        # It follows that we want to optimize for "decompress compressed data
+        # when encoded with common and officially supported compression engines"
+        # case over "raw data" and "data encoded by less common or non-official
+        # compression engines." That is why we have the inline lookup first
+        # followed by the compengines lookup.
+        #
+        # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
+        # compressed chunks. And this matters for changelog and manifest reads.
+        t = data[0:1]
+
+        if t == b'x':
+            try:
+                return _zlibdecompress(data)
+            except zlib.error as e:
+                raise error.RevlogError(
+                    _(b'revlog decompress error: %s')
+                    % stringutil.forcebytestr(e)
+                )
+        # '\0' is more common than 'u' so it goes first.
+        elif t == b'\0':
+            return data
+        elif t == b'u':
+            return util.buffer(data, 1)
+
+        compressor = self._get_decompressor(t)
+
+        return compressor.decompress(data)
+
+    @contextlib.contextmanager
+    def reading(self):
+        """Context manager that keeps data and sidedata files open for reading"""
+        if len(self.index) == 0:
+            yield  # nothing to be read
+        else:
+            with self._segmentfile.reading():
+                with self._segmentfile_sidedata.reading():
+                    yield
+
+    @property
+    def is_writing(self):
+        """True is a writing context is open"""
+        return self._writinghandles is not None
+
+    @property
+    def is_open(self):
+        """True if any file handle is being held
+
+        Used for assert and debug in the python code"""
+        return self._segmentfile.is_open or self._segmentfile_sidedata.is_open
+
+    @contextlib.contextmanager
+    def writing(self, transaction, data_end=None, sidedata_end=None):
+        """Open the revlog files for writing
+
+        Add content to a revlog should be done within such context.
+        """
+        if self.is_writing:
+            yield
+        else:
+            ifh = dfh = sdfh = None
+            try:
+                r = len(self.index)
+                # opening the data file.
+                dsize = 0
+                if r:
+                    dsize = self.end(r - 1)
+                dfh = None
+                if not self.inline:
+                    try:
+                        dfh = self.opener(self.data_file, mode=b"r+")
+                        if data_end is None:
+                            dfh.seek(0, os.SEEK_END)
+                        else:
+                            dfh.seek(data_end, os.SEEK_SET)
+                    except FileNotFoundError:
+                        dfh = self.opener(self.data_file, mode=b"w+")
+                    transaction.add(self.data_file, dsize)
+                if self.sidedata_file is not None:
+                    assert sidedata_end is not None
+                    # revlog-v2 does not inline, help Pytype
+                    assert dfh is not None
+                    try:
+                        sdfh = self.opener(self.sidedata_file, mode=b"r+")
+                        dfh.seek(sidedata_end, os.SEEK_SET)
+                    except FileNotFoundError:
+                        sdfh = self.opener(self.sidedata_file, mode=b"w+")
+                    transaction.add(self.sidedata_file, sidedata_end)
+
+                # opening the index file.
+                isize = r * self.index.entry_size
+                ifh = self.__index_write_fp()
+                if self.inline:
+                    transaction.add(self.index_file, dsize + isize)
+                else:
+                    transaction.add(self.index_file, isize)
+                # exposing all file handle for writing.
+                self._writinghandles = (ifh, dfh, sdfh)
+                self._segmentfile.writing_handle = ifh if self.inline else dfh
+                self._segmentfile_sidedata.writing_handle = sdfh
+                yield
+            finally:
+                self._writinghandles = None
+                self._segmentfile.writing_handle = None
+                self._segmentfile_sidedata.writing_handle = None
+                if dfh is not None:
+                    dfh.close()
+                if sdfh is not None:
+                    sdfh.close()
+                # closing the index file last to avoid exposing referent to
+                # potential unflushed data content.
+                if ifh is not None:
+                    ifh.close()
+
+    def __index_write_fp(self, index_end=None):
+        """internal method to open the index file for writing
+
+        You should not use this directly and use `_writing` instead
+        """
+        try:
+            if self._delay_buffer is None:
+                f = self.opener(
+                    self.index_file,
+                    mode=b"r+",
+                    checkambig=self.data_config.check_ambig,
+                )
+            else:
+                # check_ambig affect we way we open file for writing, however
+                # here, we do not actually open a file for writting as write
+                # will appened to a delay_buffer. So check_ambig is not
+                # meaningful and unneeded here.
+                f = randomaccessfile.appender(
+                    self.opener, self.index_file, b"r+", self._delay_buffer
+                )
+            if index_end is None:
+                f.seek(0, os.SEEK_END)
+            else:
+                f.seek(index_end, os.SEEK_SET)
+            return f
+        except FileNotFoundError:
+            if self._delay_buffer is None:
+                return self.opener(
+                    self.index_file,
+                    mode=b"w+",
+                    checkambig=self.data_config.check_ambig,
+                )
+            else:
+                return randomaccessfile.appender(
+                    self.opener, self.index_file, b"w+", self._delay_buffer
+                )
+
+    def __index_new_fp(self):
+        """internal method to create a new index file for writing
+
+        You should not use this unless you are upgrading from inline revlog
+        """
+        return self.opener(
+            self.index_file,
+            mode=b"w",
+            checkambig=self.data_config.check_ambig,
+            atomictemp=True,
+        )
+
+    def split_inline(self, tr, header, new_index_file_path=None):
+        """split the data of an inline revlog into an index and a data file"""
+        existing_handles = False
+        if self._writinghandles is not None:
+            existing_handles = True
+            fp = self._writinghandles[0]
+            fp.flush()
+            fp.close()
+            # We can't use the cached file handle after close(). So prevent
+            # its usage.
+            self._writinghandles = None
+            self._segmentfile.writing_handle = None
+            # No need to deal with sidedata writing handle as it is only
+            # relevant with revlog-v2 which is never inline, not reaching
+            # this code
+
+        new_dfh = self.opener(self.data_file, mode=b"w+")
+        new_dfh.truncate(0)  # drop any potentially existing data
+        try:
+            with self.reading():
+                for r in range(len(self.index)):
+                    new_dfh.write(self.get_segment_for_revs(r, r)[1])
+                new_dfh.flush()
+
+            if new_index_file_path is not None:
+                self.index_file = new_index_file_path
+            with self.__index_new_fp() as fp:
+                self.inline = False
+                for i in range(len(self.index)):
+                    e = self.index.entry_binary(i)
+                    if i == 0:
+                        packed_header = self.index.pack_header(header)
+                        e = packed_header + e
+                    fp.write(e)
+
+                # If we don't use side-write, the temp file replace the real
+                # index when we exit the context manager
+
+            self._segmentfile = randomaccessfile.randomaccessfile(
+                self.opener,
+                self.data_file,
+                self.data_config.chunk_cache_size,
+            )
+
+            if existing_handles:
+                # switched from inline to conventional reopen the index
+                ifh = self.__index_write_fp()
+                self._writinghandles = (ifh, new_dfh, None)
+                self._segmentfile.writing_handle = new_dfh
+                new_dfh = None
+                # No need to deal with sidedata writing handle as it is only
+                # relevant with revlog-v2 which is never inline, not reaching
+                # this code
+        finally:
+            if new_dfh is not None:
+                new_dfh.close()
+        return self.index_file
+
+    def get_segment_for_revs(self, startrev, endrev):
+        """Obtain a segment of raw data corresponding to a range of revisions.
+
+        Accepts the start and end revisions and an optional already-open
+        file handle to be used for reading. If the file handle is read, its
+        seek position will not be preserved.
+
+        Requests for data may be satisfied by a cache.
+
+        Returns a 2-tuple of (offset, data) for the requested range of
+        revisions. Offset is the integer offset from the beginning of the
+        revlog and data is a str or buffer of the raw byte data.
+
+        Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
+        to determine where each revision's data begins and ends.
+
+        API: we should consider making this a private part of the InnerRevlog
+        at some point.
+        """
+        # Inlined self.start(startrev) & self.end(endrev) for perf reasons
+        # (functions are expensive).
+        index = self.index
+        istart = index[startrev]
+        start = int(istart[0] >> 16)
+        if startrev == endrev:
+            end = start + istart[1]
+        else:
+            iend = index[endrev]
+            end = int(iend[0] >> 16) + iend[1]
+
+        if self.inline:
+            start += (startrev + 1) * self.index.entry_size
+            end += (endrev + 1) * self.index.entry_size
+        length = end - start
+
+        return start, self._segmentfile.read_chunk(start, length)
+
+    def _chunk(self, rev):
+        """Obtain a single decompressed chunk for a revision.
+
+        Accepts an integer revision and an optional already-open file handle
+        to be used for reading. If used, the seek position of the file will not
+        be preserved.
+
+        Returns a str holding uncompressed data for the requested revision.
+        """
+        if self._uncompressed_chunk_cache is not None:
+            uncomp = self._uncompressed_chunk_cache.get(rev)
+            if uncomp is not None:
+                return uncomp
+
+        compression_mode = self.index[rev][10]
+        data = self.get_segment_for_revs(rev, rev)[1]
+        if compression_mode == COMP_MODE_PLAIN:
+            uncomp = data
+        elif compression_mode == COMP_MODE_DEFAULT:
+            uncomp = self._decompressor(data)
+        elif compression_mode == COMP_MODE_INLINE:
+            uncomp = self.decompress(data)
+        else:
+            msg = b'unknown compression mode %d'
+            msg %= compression_mode
+            raise error.RevlogError(msg)
+        if self._uncompressed_chunk_cache is not None:
+            self._uncompressed_chunk_cache.insert(rev, uncomp, cost=len(uncomp))
+        return uncomp
+
+    def _chunks(self, revs, targetsize=None):
+        """Obtain decompressed chunks for the specified revisions.
+
+        Accepts an iterable of numeric revisions that are assumed to be in
+        ascending order. Also accepts an optional already-open file handle
+        to be used for reading. If used, the seek position of the file will
+        not be preserved.
+
+        This function is similar to calling ``self._chunk()`` multiple times,
+        but is faster.
+
+        Returns a list with decompressed data for each requested revision.
+        """
+        if not revs:
+            return []
+        start = self.start
+        length = self.length
+        inline = self.inline
+        iosize = self.index.entry_size
+        buffer = util.buffer
+
+        fetched_revs = []
+        fadd = fetched_revs.append
+
+        chunks = []
+        ladd = chunks.append
+
+        if self._uncompressed_chunk_cache is None:
+            fetched_revs = revs
+        else:
+            for rev in revs:
+                cached_value = self._uncompressed_chunk_cache.get(rev)
+                if cached_value is None:
+                    fadd(rev)
+                else:
+                    ladd((rev, cached_value))
+
+        if not fetched_revs:
+            slicedchunks = ()
+        elif not self.data_config.with_sparse_read:
+            slicedchunks = (fetched_revs,)
+        else:
+            slicedchunks = deltautil.slicechunk(
+                self,
+                fetched_revs,
+                targetsize=targetsize,
+            )
+
+        for revschunk in slicedchunks:
+            firstrev = revschunk[0]
+            # Skip trailing revisions with empty diff
+            for lastrev in revschunk[::-1]:
+                if length(lastrev) != 0:
+                    break
+
+            try:
+                offset, data = self.get_segment_for_revs(firstrev, lastrev)
+            except OverflowError:
+                # issue4215 - we can't cache a run of chunks greater than
+                # 2G on Windows
+                for rev in revschunk:
+                    ladd((rev, self._chunk(rev)))
+
+            decomp = self.decompress
+            # self._decompressor might be None, but will not be used in that case
+            def_decomp = self._decompressor
+            for rev in revschunk:
+                chunkstart = start(rev)
+                if inline:
+                    chunkstart += (rev + 1) * iosize
+                chunklength = length(rev)
+                comp_mode = self.index[rev][10]
+                c = buffer(data, chunkstart - offset, chunklength)
+                if comp_mode == COMP_MODE_PLAIN:
+                    c = c
+                elif comp_mode == COMP_MODE_INLINE:
+                    c = decomp(c)
+                elif comp_mode == COMP_MODE_DEFAULT:
+                    c = def_decomp(c)
+                else:
+                    msg = b'unknown compression mode %d'
+                    msg %= comp_mode
+                    raise error.RevlogError(msg)
+                ladd((rev, c))
+                if self._uncompressed_chunk_cache is not None:
+                    self._uncompressed_chunk_cache.insert(rev, c, len(c))
+
+        chunks.sort()
+        return [x[1] for x in chunks]
+
+    def raw_text(self, node, rev):
+        """return the possibly unvalidated rawtext for a revision
+
+        returns (rev, rawtext, validated)
+        """
+
+        # revision in the cache (could be useful to apply delta)
+        cachedrev = None
+        # An intermediate text to apply deltas to
+        basetext = None
+
+        # Check if we have the entry in cache
+        # The cache entry looks like (node, rev, rawtext)
+        if self._revisioncache:
+            cachedrev = self._revisioncache[1]
+
+        chain, stopped = self._deltachain(rev, stoprev=cachedrev)
+        if stopped:
+            basetext = self._revisioncache[2]
+
+        # drop cache to save memory, the caller is expected to
+        # update self._inner._revisioncache after validating the text
+        self._revisioncache = None
+
+        targetsize = None
+        rawsize = self.index[rev][2]
+        if 0 <= rawsize:
+            targetsize = 4 * rawsize
+
+        if self._uncompressed_chunk_cache is not None:
+            # dynamically update the uncompressed_chunk_cache size to the
+            # largest revision we saw in this revlog.
+            factor = self.data_config.uncompressed_cache_factor
+            candidate_size = rawsize * factor
+            if candidate_size > self._uncompressed_chunk_cache.maxcost:
+                self._uncompressed_chunk_cache.maxcost = candidate_size
+
+        bins = self._chunks(chain, targetsize=targetsize)
+        if basetext is None:
+            basetext = bytes(bins[0])
+            bins = bins[1:]
+
+        rawtext = mdiff.patches(basetext, bins)
+        del basetext  # let us have a chance to free memory early
+        return (rev, rawtext, False)
+
+    def sidedata(self, rev, sidedata_end):
+        """Return the sidedata for a given revision number."""
+        index_entry = self.index[rev]
+        sidedata_offset = index_entry[8]
+        sidedata_size = index_entry[9]
+
+        if self.inline:
+            sidedata_offset += self.index.entry_size * (1 + rev)
+        if sidedata_size == 0:
+            return {}
+
+        if sidedata_end < sidedata_offset + sidedata_size:
+            filename = self.sidedata_file
+            end = sidedata_end
+            offset = sidedata_offset
+            length = sidedata_size
+            m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
+            raise error.RevlogError(m)
+
+        comp_segment = self._segmentfile_sidedata.read_chunk(
+            sidedata_offset, sidedata_size
+        )
+
+        comp = self.index[rev][11]
+        if comp == COMP_MODE_PLAIN:
+            segment = comp_segment
+        elif comp == COMP_MODE_DEFAULT:
+            segment = self._decompressor(comp_segment)
+        elif comp == COMP_MODE_INLINE:
+            segment = self.decompress(comp_segment)
+        else:
+            msg = b'unknown compression mode %d'
+            msg %= comp
+            raise error.RevlogError(msg)
+
+        sidedata = sidedatautil.deserialize_sidedata(segment)
+        return sidedata
+
+    def write_entry(
+        self,
+        transaction,
+        entry,
+        data,
+        link,
+        offset,
+        sidedata,
+        sidedata_offset,
+        index_end,
+        data_end,
+        sidedata_end,
+    ):
+        # Files opened in a+ mode have inconsistent behavior on various
+        # platforms. Windows requires that a file positioning call be made
+        # when the file handle transitions between reads and writes. See
+        # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
+        # platforms, Python or the platform itself can be buggy. Some versions
+        # of Solaris have been observed to not append at the end of the file
+        # if the file was seeked to before the end. See issue4943 for more.
+        #
+        # We work around this issue by inserting a seek() before writing.
+        # Note: This is likely not necessary on Python 3. However, because
+        # the file handle is reused for reads and may be seeked there, we need
+        # to be careful before changing this.
+        if self._writinghandles is None:
+            msg = b'adding revision outside `revlog._writing` context'
+            raise error.ProgrammingError(msg)
+        ifh, dfh, sdfh = self._writinghandles
+        if index_end is None:
+            ifh.seek(0, os.SEEK_END)
+        else:
+            ifh.seek(index_end, os.SEEK_SET)
+        if dfh:
+            if data_end is None:
+                dfh.seek(0, os.SEEK_END)
+            else:
+                dfh.seek(data_end, os.SEEK_SET)
+        if sdfh:
+            sdfh.seek(sidedata_end, os.SEEK_SET)
+
+        curr = len(self.index) - 1
+        if not self.inline:
+            transaction.add(self.data_file, offset)
+            if self.sidedata_file:
+                transaction.add(self.sidedata_file, sidedata_offset)
+            transaction.add(self.canonical_index_file, curr * len(entry))
+            if data[0]:
+                dfh.write(data[0])
+            dfh.write(data[1])
+            if sidedata:
+                sdfh.write(sidedata)
+            if self._delay_buffer is None:
+                ifh.write(entry)
+            else:
+                self._delay_buffer.append(entry)
+        else:
+            offset += curr * self.index.entry_size
+            transaction.add(self.canonical_index_file, offset)
+            assert not sidedata
+            if self._delay_buffer is None:
+                ifh.write(entry)
+                ifh.write(data[0])
+                ifh.write(data[1])
+            else:
+                self._delay_buffer.append(entry)
+                self._delay_buffer.append(data[0])
+                self._delay_buffer.append(data[1])
+        return (
+            ifh.tell(),
+            dfh.tell() if dfh else None,
+            sdfh.tell() if sdfh else None,
+        )
+
+    def _divert_index(self):
+        return self.index_file + b'.a'
+
+    def delay(self):
+        assert not self.is_open
+        if self._delay_buffer is not None or self._orig_index_file is not None:
+            # delay or divert already in place
+            return None
+        elif len(self.index) == 0:
+            self._orig_index_file = self.index_file
+            self.index_file = self._divert_index()
+            self._segmentfile.filename = self.index_file
+            assert self._orig_index_file is not None
+            assert self.index_file is not None
+            if self.opener.exists(self.index_file):
+                self.opener.unlink(self.index_file)
+            return self.index_file
+        else:
+            self._segmentfile._delay_buffer = self._delay_buffer = []
+            return None
+
+    def write_pending(self):
+        assert not self.is_open
+        if self._orig_index_file is not None:
+            return None, True
+        any_pending = False
+        pending_index_file = self._divert_index()
+        if self.opener.exists(pending_index_file):
+            self.opener.unlink(pending_index_file)
+        util.copyfile(
+            self.opener.join(self.index_file),
+            self.opener.join(pending_index_file),
+        )
+        if self._delay_buffer:
+            with self.opener(pending_index_file, b'r+') as ifh:
+                ifh.seek(0, os.SEEK_END)
+                ifh.write(b"".join(self._delay_buffer))
+            any_pending = True
+        self._segmentfile._delay_buffer = self._delay_buffer = None
+        self._orig_index_file = self.index_file
+        self.index_file = pending_index_file
+        self._segmentfile.filename = self.index_file
+        return self.index_file, any_pending
+
+    def finalize_pending(self):
+        assert not self.is_open
+
+        delay = self._delay_buffer is not None
+        divert = self._orig_index_file is not None
+
+        if delay and divert:
+            assert False, "unreachable"
+        elif delay:
+            if self._delay_buffer:
+                with self.opener(self.index_file, b'r+') as ifh:
+                    ifh.seek(0, os.SEEK_END)
+                    ifh.write(b"".join(self._delay_buffer))
+            self._segmentfile._delay_buffer = self._delay_buffer = None
+        elif divert:
+            if self.opener.exists(self.index_file):
+                self.opener.rename(
+                    self.index_file,
+                    self._orig_index_file,
+                    checkambig=True,
+                )
+            self.index_file = self._orig_index_file
+            self._orig_index_file = None
+            self._segmentfile.filename = self.index_file
+        else:
+            msg = b"not delay or divert found on this revlog"
+            raise error.ProgrammingError(msg)
+        return self.canonical_index_file
+
+
 class revlog:
     """
     the underlying revision storage object
@@ -292,6 +1278,7 @@
 
     @staticmethod
     def is_inline_index(header_bytes):
+        """Determine if a revlog is inline from the initial bytes of the index"""
         if len(header_bytes) == 0:
             return True
 
@@ -332,7 +1319,6 @@
         that test, debug, or performance measurement code might not set this to
         accurate value.
         """
-        self.upperboundcomp = upperboundcomp
 
         self.radix = radix
 
@@ -351,62 +1337,232 @@
         assert target[0] in ALL_KINDS
         assert len(target) == 2
         self.target = target
-        #  When True, indexfile is opened with checkambig=True at writing, to
-        #  avoid file stat ambiguity.
-        self._checkambig = checkambig
-        self._mmaplargeindex = mmaplargeindex
-        self._censorable = censorable
-        # 3-tuple of (node, rev, text) for a raw revision.
-        self._revisioncache = None
+        if b'feature-config' in self.opener.options:
+            self.feature_config = self.opener.options[b'feature-config'].copy()
+        else:
+            self.feature_config = FeatureConfig()
+        self.feature_config.censorable = censorable
+        self.feature_config.canonical_parent_order = canonical_parent_order
+        if b'data-config' in self.opener.options:
+            self.data_config = self.opener.options[b'data-config'].copy()
+        else:
+            self.data_config = DataConfig()
+        self.data_config.check_ambig = checkambig
+        self.data_config.mmap_large_index = mmaplargeindex
+        if b'delta-config' in self.opener.options:
+            self.delta_config = self.opener.options[b'delta-config'].copy()
+        else:
+            self.delta_config = DeltaConfig()
+        self.delta_config.upper_bound_comp = upperboundcomp
+
         # Maps rev to chain base rev.
         self._chainbasecache = util.lrucachedict(100)
-        # 2-tuple of (offset, data) of raw data from the revlog at an offset.
-        self._chunkcache = (0, b'')
-        # How much data to read and cache into the raw revlog data cache.
-        self._chunkcachesize = 65536
-        self._maxchainlen = None
-        self._deltabothparents = True
-        self._candidate_group_chunk_size = 0
-        self._debug_delta = False
+
         self.index = None
         self._docket = None
         self._nodemap_docket = None
         # Mapping of partial identifiers to full nodes.
         self._pcache = {}
-        # Mapping of revision integer to full node.
-        self._compengine = b'zlib'
-        self._compengineopts = {}
-        self._maxdeltachainspan = -1
-        self._withsparseread = False
-        self._sparserevlog = False
-        self.hassidedata = False
-        self._srdensitythreshold = 0.50
-        self._srmingapsize = 262144
 
         # other optionnals features
 
-        # might remove rank configuration once the computation has no impact
-        self._compute_rank = False
-
         # Make copy of flag processors so each revlog instance can support
         # custom flags.
         self._flagprocessors = dict(flagutil.flagprocessors)
-
-        # 3-tuple of file handles being used for active writing.
-        self._writinghandles = None
         # prevent nesting of addgroup
         self._adding_group = None
 
-        self._loadindex()
-
+        chunk_cache = self._loadindex()
+        self._load_inner(chunk_cache)
         self._concurrencychecker = concurrencychecker
 
-        # parent order is supposed to be semantically irrelevant, so we
-        # normally resort parents to ensure that the first parent is non-null,
-        # if there is a non-null parent at all.
-        # filelog abuses the parent order as flag to mark some instances of
-        # meta-encoded files, so allow it to disable this behavior.
-        self.canonical_parent_order = canonical_parent_order
+    @property
+    def _generaldelta(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.general_delta", b"6.6", stacklevel=2
+        )
+        return self.delta_config.general_delta
+
+    @property
+    def _checkambig(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.data_config.checkambig", b"6.6", stacklevel=2
+        )
+        return self.data_config.check_ambig
+
+    @property
+    def _mmaplargeindex(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.data_config.mmap_large_index", b"6.6", stacklevel=2
+        )
+        return self.data_config.mmap_large_index
+
+    @property
+    def _censorable(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.feature_config.censorable", b"6.6", stacklevel=2
+        )
+        return self.feature_config.censorable
+
+    @property
+    def _chunkcachesize(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.data_config.chunk_cache_size", b"6.6", stacklevel=2
+        )
+        return self.data_config.chunk_cache_size
+
+    @property
+    def _maxchainlen(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.max_chain_len", b"6.6", stacklevel=2
+        )
+        return self.delta_config.max_chain_len
+
+    @property
+    def _deltabothparents(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.delta_both_parents", b"6.6", stacklevel=2
+        )
+        return self.delta_config.delta_both_parents
+
+    @property
+    def _candidate_group_chunk_size(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.candidate_group_chunk_size",
+            b"6.6",
+            stacklevel=2,
+        )
+        return self.delta_config.candidate_group_chunk_size
+
+    @property
+    def _debug_delta(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.debug_delta", b"6.6", stacklevel=2
+        )
+        return self.delta_config.debug_delta
+
+    @property
+    def _compengine(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.feature_config.compression_engine",
+            b"6.6",
+            stacklevel=2,
+        )
+        return self.feature_config.compression_engine
+
+    @property
+    def upperboundcomp(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.upper_bound_comp",
+            b"6.6",
+            stacklevel=2,
+        )
+        return self.delta_config.upper_bound_comp
+
+    @property
+    def _compengineopts(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.feature_config.compression_engine_options",
+            b"6.6",
+            stacklevel=2,
+        )
+        return self.feature_config.compression_engine_options
+
+    @property
+    def _maxdeltachainspan(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.max_deltachain_span", b"6.6", stacklevel=2
+        )
+        return self.delta_config.max_deltachain_span
+
+    @property
+    def _withsparseread(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.data_config.with_sparse_read", b"6.6", stacklevel=2
+        )
+        return self.data_config.with_sparse_read
+
+    @property
+    def _sparserevlog(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.sparse_revlog", b"6.6", stacklevel=2
+        )
+        return self.delta_config.sparse_revlog
+
+    @property
+    def hassidedata(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.feature_config.has_side_data", b"6.6", stacklevel=2
+        )
+        return self.feature_config.has_side_data
+
+    @property
+    def _srdensitythreshold(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.data_config.sr_density_threshold",
+            b"6.6",
+            stacklevel=2,
+        )
+        return self.data_config.sr_density_threshold
+
+    @property
+    def _srmingapsize(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.data_config.sr_min_gap_size", b"6.6", stacklevel=2
+        )
+        return self.data_config.sr_min_gap_size
+
+    @property
+    def _compute_rank(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.feature_config.compute_rank", b"6.6", stacklevel=2
+        )
+        return self.feature_config.compute_rank
+
+    @property
+    def canonical_parent_order(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.feature_config.canonical_parent_order",
+            b"6.6",
+            stacklevel=2,
+        )
+        return self.feature_config.canonical_parent_order
+
+    @property
+    def _lazydelta(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.lazy_delta", b"6.6", stacklevel=2
+        )
+        return self.delta_config.lazy_delta
+
+    @property
+    def _lazydeltabase(self):
+        """temporary compatibility proxy"""
+        util.nouideprecwarn(
+            b"use revlog.delta_config.lazy_delta_base", b"6.6", stacklevel=2
+        )
+        return self.delta_config.lazy_delta_base
 
     def _init_opts(self):
         """process options (from above/config) to setup associated default revlog mode
@@ -424,12 +1580,12 @@
         * force_nodemap:
             force the usage of a "development" version of the nodemap code
         """
-        mmapindexthreshold = None
         opts = self.opener.options
 
         if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
             new_header = CHANGELOGV2
-            self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
+            compute_rank = opts.get(b'changelogv2.compute-rank', True)
+            self.feature_config.compute_rank = compute_rank
         elif b'revlogv2' in opts:
             new_header = REVLOGV2
         elif b'revlogv1' in opts:
@@ -441,55 +1597,26 @@
         else:
             new_header = REVLOG_DEFAULT_VERSION
 
-        if b'chunkcachesize' in opts:
-            self._chunkcachesize = opts[b'chunkcachesize']
-        if b'maxchainlen' in opts:
-            self._maxchainlen = opts[b'maxchainlen']
-        if b'deltabothparents' in opts:
-            self._deltabothparents = opts[b'deltabothparents']
-        dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
-        if dps_cgds:
-            self._candidate_group_chunk_size = dps_cgds
-        self._lazydelta = bool(opts.get(b'lazydelta', True))
-        self._lazydeltabase = False
-        if self._lazydelta:
-            self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
-        if b'debug-delta' in opts:
-            self._debug_delta = opts[b'debug-delta']
-        if b'compengine' in opts:
-            self._compengine = opts[b'compengine']
-        if b'zlib.level' in opts:
-            self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
-        if b'zstd.level' in opts:
-            self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
-        if b'maxdeltachainspan' in opts:
-            self._maxdeltachainspan = opts[b'maxdeltachainspan']
-        if self._mmaplargeindex and b'mmapindexthreshold' in opts:
-            mmapindexthreshold = opts[b'mmapindexthreshold']
-        self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
-        withsparseread = bool(opts.get(b'with-sparse-read', False))
-        # sparse-revlog forces sparse-read
-        self._withsparseread = self._sparserevlog or withsparseread
-        if b'sparse-read-density-threshold' in opts:
-            self._srdensitythreshold = opts[b'sparse-read-density-threshold']
-        if b'sparse-read-min-gap-size' in opts:
-            self._srmingapsize = opts[b'sparse-read-min-gap-size']
-        if opts.get(b'enableellipsis'):
+        mmapindexthreshold = None
+        if self.data_config.mmap_large_index:
+            mmapindexthreshold = self.data_config.mmap_index_threshold
+        if self.feature_config.enable_ellipsis:
             self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
 
         # revlog v0 doesn't have flag processors
         for flag, processor in opts.get(b'flagprocessors', {}).items():
             flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
 
-        if self._chunkcachesize <= 0:
+        chunk_cache_size = self.data_config.chunk_cache_size
+        if chunk_cache_size <= 0:
             raise error.RevlogError(
                 _(b'revlog chunk cache size %r is not greater than 0')
-                % self._chunkcachesize
+                % chunk_cache_size
             )
-        elif self._chunkcachesize & (self._chunkcachesize - 1):
+        elif chunk_cache_size & (chunk_cache_size - 1):
             raise error.RevlogError(
                 _(b'revlog chunk cache size %r is not a power of 2')
-                % self._chunkcachesize
+                % chunk_cache_size
             )
         force_nodemap = opts.get(b'devel-force-nodemap', False)
         return new_header, mmapindexthreshold, force_nodemap
@@ -520,6 +1647,22 @@
             return b''
 
     def get_streams(self, max_linkrev, force_inline=False):
+        """return a list of streams that represent this revlog
+
+        This is used by stream-clone to do bytes to bytes copies of a repository.
+
+        This streams data for all revisions that refer to a changelog revision up
+        to `max_linkrev`.
+
+        If `force_inline` is set, it enforces that the stream will represent an inline revlog.
+
+        It returns is a list of three-tuple:
+
+            [
+                (filename, bytes_stream, stream_size),
+                …
+            ]
+        """
         n = len(self)
         index = self.index
         while n > 0:
@@ -547,7 +1690,7 @@
         if self._inline:
 
             def get_stream():
-                with self._indexfp() as fp:
+                with self.opener(self._indexfile, mode=b"r") as fp:
                     yield None
                     size = index_size + data_size
                     if size <= 65536:
@@ -563,7 +1706,7 @@
         elif force_inline:
 
             def get_stream():
-                with self._datafp() as fp_d:
+                with self.reading():
                     yield None
 
                     for rev in range(n):
@@ -576,7 +1719,7 @@
                             header = self.index.pack_header(header)
                             idx = header + idx
                         yield idx
-                        yield self._getsegmentforrevs(rev, rev, df=fp_d)[1]
+                        yield self._inner.get_segment_for_revs(rev, rev)[1]
 
             inline_stream = get_stream()
             next(inline_stream)
@@ -586,7 +1729,7 @@
         else:
 
             def get_index_stream():
-                with self._indexfp() as fp:
+                with self.opener(self._indexfile, mode=b"r") as fp:
                     yield None
                     if index_size <= 65536:
                         yield fp.read(index_size)
@@ -651,8 +1794,10 @@
 
             features = FEATURES_BY_VERSION[self._format_version]
             self._inline = features[b'inline'](self._format_flags)
-            self._generaldelta = features[b'generaldelta'](self._format_flags)
-            self.hassidedata = features[b'sidedata']
+            self.delta_config.general_delta = features[b'generaldelta'](
+                self._format_flags
+            )
+            self.feature_config.has_side_data = features[b'sidedata']
 
             if not features[b'docket']:
                 self._indexfile = entry_point
@@ -681,7 +1826,7 @@
 
             self._inline = False
             # generaldelta implied by version 2 revlogs.
-            self._generaldelta = True
+            self.delta_config.general_delta = True
             # the logic for persistent nodemap will be dealt with within the
             # main docket, so disable it for now.
             self._nodemap_file = None
@@ -698,8 +1843,8 @@
         self.nullid = self.nodeconstants.nullid
 
         # sparse-revlog can't be on without general-delta (issue6056)
-        if not self._generaldelta:
-            self._sparserevlog = False
+        if not self.delta_config.general_delta:
+            self.delta_config.sparse_revlog = False
 
         self._storedeltachains = True
 
@@ -733,7 +1878,7 @@
             use_nodemap = (
                 not self._inline
                 and self._nodemap_file is not None
-                and util.safehasattr(index, 'update_nodemap_data')
+                and hasattr(index, 'update_nodemap_data')
             )
             if use_nodemap:
                 nodemap_data = nodemaputil.persisted_data(self)
@@ -751,21 +1896,30 @@
                 _(b"index %s is corrupted") % self.display_id
             )
         self.index = index
-        self._segmentfile = randomaccessfile.randomaccessfile(
-            self.opener,
-            (self._indexfile if self._inline else self._datafile),
-            self._chunkcachesize,
-            chunkcache,
-        )
-        self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
-            self.opener,
-            self._sidedatafile,
-            self._chunkcachesize,
-        )
         # revnum -> (chain-length, sum-delta-length)
         self._chaininfocache = util.lrucachedict(500)
-        # revlog header -> revlog compressor
-        self._decompressors = {}
+
+        return chunkcache
+
+    def _load_inner(self, chunk_cache):
+        if self._docket is None:
+            default_compression_header = None
+        else:
+            default_compression_header = self._docket.default_compression_header
+
+        self._inner = _InnerRevlog(
+            opener=self.opener,
+            index=self.index,
+            index_file=self._indexfile,
+            data_file=self._datafile,
+            sidedata_file=self._sidedatafile,
+            inline=self._inline,
+            data_config=self.data_config,
+            delta_config=self.delta_config,
+            feature_config=self.feature_config,
+            chunk_cache=chunk_cache,
+            default_compression_header=default_compression_header,
+        )
 
     def get_revlog(self):
         """simple function to mirror API of other not-really-revlog API"""
@@ -785,76 +1939,10 @@
         else:
             return self.radix
 
-    def _get_decompressor(self, t):
-        try:
-            compressor = self._decompressors[t]
-        except KeyError:
-            try:
-                engine = util.compengines.forrevlogheader(t)
-                compressor = engine.revlogcompressor(self._compengineopts)
-                self._decompressors[t] = compressor
-            except KeyError:
-                raise error.RevlogError(
-                    _(b'unknown compression type %s') % binascii.hexlify(t)
-                )
-        return compressor
-
-    @util.propertycache
-    def _compressor(self):
-        engine = util.compengines[self._compengine]
-        return engine.revlogcompressor(self._compengineopts)
-
-    @util.propertycache
-    def _decompressor(self):
-        """the default decompressor"""
-        if self._docket is None:
-            return None
-        t = self._docket.default_compression_header
-        c = self._get_decompressor(t)
-        return c.decompress
-
-    def _indexfp(self):
-        """file object for the revlog's index file"""
-        return self.opener(self._indexfile, mode=b"r")
-
-    def __index_write_fp(self):
-        # You should not use this directly and use `_writing` instead
-        try:
-            f = self.opener(
-                self._indexfile, mode=b"r+", checkambig=self._checkambig
-            )
-            if self._docket is None:
-                f.seek(0, os.SEEK_END)
-            else:
-                f.seek(self._docket.index_end, os.SEEK_SET)
-            return f
-        except FileNotFoundError:
-            return self.opener(
-                self._indexfile, mode=b"w+", checkambig=self._checkambig
-            )
-
-    def __index_new_fp(self):
-        # You should not use this unless you are upgrading from inline revlog
-        return self.opener(
-            self._indexfile,
-            mode=b"w",
-            checkambig=self._checkambig,
-            atomictemp=True,
-        )
-
     def _datafp(self, mode=b'r'):
         """file object for the revlog's data file"""
         return self.opener(self._datafile, mode=mode)
 
-    @contextlib.contextmanager
-    def _sidedatareadfp(self):
-        """file object suitable to read sidedata"""
-        if self._writinghandles:
-            yield self._writinghandles[2]
-        else:
-            with self.opener(self._sidedatafile) as fp:
-                yield fp
-
     def tiprev(self):
         return len(self.index) - 1
 
@@ -881,7 +1969,7 @@
         except KeyError:
             return False
 
-    def candelta(self, baserev, rev):
+    def _candelta(self, baserev, rev):
         """whether two revisions (baserev, rev) can be delta-ed or not"""
         # Disable delta if either rev requires a content-changing flag
         # processor (ex. LFS). This is because such flag processor can alter
@@ -895,6 +1983,10 @@
         return True
 
     def update_caches(self, transaction):
+        """update on disk cache
+
+        If a transaction is passed, the update may be delayed to transaction
+        commit."""
         if self._nodemap_file is not None:
             if transaction is None:
                 nodemaputil.update_persistent_nodemap(self)
@@ -902,10 +1994,9 @@
                 nodemaputil.setup_persistent_nodemap(transaction, self)
 
     def clearcaches(self):
-        self._revisioncache = None
+        """Clear in-memory caches"""
         self._chainbasecache.clear()
-        self._segmentfile.clear_cache()
-        self._segmentfile_sidedata.clear_cache()
+        self._inner.clear_cache()
         self._pcache = {}
         self._nodemap_docket = None
         self.index.clearcaches()
@@ -914,7 +2005,7 @@
         use_nodemap = (
             not self._inline
             and self._nodemap_file is not None
-            and util.safehasattr(self.index, 'update_nodemap_data')
+            and hasattr(self.index, 'update_nodemap_data')
         )
         if use_nodemap:
             nodemap_data = nodemaputil.persisted_data(self)
@@ -923,6 +2014,7 @@
                 self.index.update_nodemap_data(*nodemap_data)
 
     def rev(self, node):
+        """return the revision number associated with a <nodeid>"""
         try:
             return self.index.rev(node)
         except TypeError:
@@ -967,7 +2059,7 @@
         return self.index[rev][1]
 
     def sidedata_length(self, rev):
-        if not self.hassidedata:
+        if not self.feature_config.has_side_data:
             return 0
         return self.index[rev][9]
 
@@ -1034,7 +2126,7 @@
                 raise error.WdirUnsupported
             raise
 
-        if self.canonical_parent_order and entry[5] == nullrev:
+        if self.feature_config.canonical_parent_order and entry[5] == nullrev:
             return entry[6], entry[5]
         else:
             return entry[5], entry[6]
@@ -1059,7 +2151,7 @@
         i = self.index
         d = i[self.rev(node)]
         # inline node() to avoid function call overhead
-        if self.canonical_parent_order and d[5] == self.nullid:
+        if self.feature_config.canonical_parent_order and d[5] == self.nullid:
             return i[d[6]][7], i[d[5]][7]
         else:
             return i[d[5]][7], i[d[6]][7]
@@ -1072,7 +2164,7 @@
         if rev in chaininfocache:
             return chaininfocache[rev]
         index = self.index
-        generaldelta = self._generaldelta
+        generaldelta = self.delta_config.general_delta
         iterrev = rev
         e = index[iterrev]
         clen = 0
@@ -1099,45 +2191,7 @@
         return r
 
     def _deltachain(self, rev, stoprev=None):
-        """Obtain the delta chain for a revision.
-
-        ``stoprev`` specifies a revision to stop at. If not specified, we
-        stop at the base of the chain.
-
-        Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
-        revs in ascending order and ``stopped`` is a bool indicating whether
-        ``stoprev`` was hit.
-        """
-        # Try C implementation.
-        try:
-            return self.index.deltachain(rev, stoprev, self._generaldelta)
-        except AttributeError:
-            pass
-
-        chain = []
-
-        # Alias to prevent attribute lookup in tight loop.
-        index = self.index
-        generaldelta = self._generaldelta
-
-        iterrev = rev
-        e = index[iterrev]
-        while iterrev != e[3] and iterrev != stoprev:
-            chain.append(iterrev)
-            if generaldelta:
-                iterrev = e[3]
-            else:
-                iterrev -= 1
-            e = index[iterrev]
-
-        if iterrev == stoprev:
-            stopped = True
-        else:
-            chain.append(iterrev)
-            stopped = False
-
-        chain.reverse()
-        return chain, stopped
+        return self._inner._deltachain(rev, stoprev=stoprev)
 
     def ancestors(self, revs, stoprev=0, inclusive=False):
         """Generate the ancestors of 'revs' in reverse revision order.
@@ -1752,177 +2806,27 @@
         p1, p2 = self.parents(node)
         return storageutil.hashrevisionsha1(text, p1, p2) != node
 
-    def _getsegmentforrevs(self, startrev, endrev, df=None):
-        """Obtain a segment of raw data corresponding to a range of revisions.
-
-        Accepts the start and end revisions and an optional already-open
-        file handle to be used for reading. If the file handle is read, its
-        seek position will not be preserved.
-
-        Requests for data may be satisfied by a cache.
-
-        Returns a 2-tuple of (offset, data) for the requested range of
-        revisions. Offset is the integer offset from the beginning of the
-        revlog and data is a str or buffer of the raw byte data.
-
-        Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
-        to determine where each revision's data begins and ends.
-        """
-        # Inlined self.start(startrev) & self.end(endrev) for perf reasons
-        # (functions are expensive).
-        index = self.index
-        istart = index[startrev]
-        start = int(istart[0] >> 16)
-        if startrev == endrev:
-            end = start + istart[1]
-        else:
-            iend = index[endrev]
-            end = int(iend[0] >> 16) + iend[1]
-
-        if self._inline:
-            start += (startrev + 1) * self.index.entry_size
-            end += (endrev + 1) * self.index.entry_size
-        length = end - start
-
-        return start, self._segmentfile.read_chunk(start, length, df)
-
-    def _chunk(self, rev, df=None):
-        """Obtain a single decompressed chunk for a revision.
-
-        Accepts an integer revision and an optional already-open file handle
-        to be used for reading. If used, the seek position of the file will not
-        be preserved.
-
-        Returns a str holding uncompressed data for the requested revision.
-        """
-        compression_mode = self.index[rev][10]
-        data = self._getsegmentforrevs(rev, rev, df=df)[1]
-        if compression_mode == COMP_MODE_PLAIN:
-            return data
-        elif compression_mode == COMP_MODE_DEFAULT:
-            return self._decompressor(data)
-        elif compression_mode == COMP_MODE_INLINE:
-            return self.decompress(data)
-        else:
-            msg = b'unknown compression mode %d'
-            msg %= compression_mode
-            raise error.RevlogError(msg)
-
-    def _chunks(self, revs, df=None, targetsize=None):
-        """Obtain decompressed chunks for the specified revisions.
-
-        Accepts an iterable of numeric revisions that are assumed to be in
-        ascending order. Also accepts an optional already-open file handle
-        to be used for reading. If used, the seek position of the file will
-        not be preserved.
-
-        This function is similar to calling ``self._chunk()`` multiple times,
-        but is faster.
-
-        Returns a list with decompressed data for each requested revision.
-        """
-        if not revs:
-            return []
-        start = self.start
-        length = self.length
-        inline = self._inline
-        iosize = self.index.entry_size
-        buffer = util.buffer
-
-        l = []
-        ladd = l.append
-
-        if not self._withsparseread:
-            slicedchunks = (revs,)
-        else:
-            slicedchunks = deltautil.slicechunk(
-                self, revs, targetsize=targetsize
-            )
-
-        for revschunk in slicedchunks:
-            firstrev = revschunk[0]
-            # Skip trailing revisions with empty diff
-            for lastrev in revschunk[::-1]:
-                if length(lastrev) != 0:
-                    break
-
-            try:
-                offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
-            except OverflowError:
-                # issue4215 - we can't cache a run of chunks greater than
-                # 2G on Windows
-                return [self._chunk(rev, df=df) for rev in revschunk]
-
-            decomp = self.decompress
-            # self._decompressor might be None, but will not be used in that case
-            def_decomp = self._decompressor
-            for rev in revschunk:
-                chunkstart = start(rev)
-                if inline:
-                    chunkstart += (rev + 1) * iosize
-                chunklength = length(rev)
-                comp_mode = self.index[rev][10]
-                c = buffer(data, chunkstart - offset, chunklength)
-                if comp_mode == COMP_MODE_PLAIN:
-                    ladd(c)
-                elif comp_mode == COMP_MODE_INLINE:
-                    ladd(decomp(c))
-                elif comp_mode == COMP_MODE_DEFAULT:
-                    ladd(def_decomp(c))
-                else:
-                    msg = b'unknown compression mode %d'
-                    msg %= comp_mode
-                    raise error.RevlogError(msg)
-
-        return l
-
     def deltaparent(self, rev):
         """return deltaparent of the given revision"""
         base = self.index[rev][3]
         if base == rev:
             return nullrev
-        elif self._generaldelta:
+        elif self.delta_config.general_delta:
             return base
         else:
             return rev - 1
 
     def issnapshot(self, rev):
         """tells whether rev is a snapshot"""
-        if not self._sparserevlog:
-            return self.deltaparent(rev) == nullrev
-        elif util.safehasattr(self.index, 'issnapshot'):
-            # directly assign the method to cache the testing and access
-            self.issnapshot = self.index.issnapshot
-            return self.issnapshot(rev)
-        if rev == nullrev:
-            return True
-        entry = self.index[rev]
-        base = entry[3]
-        if base == rev:
-            return True
-        if base == nullrev:
-            return True
-        p1 = entry[5]
-        while self.length(p1) == 0:
-            b = self.deltaparent(p1)
-            if b == p1:
-                break
-            p1 = b
-        p2 = entry[6]
-        while self.length(p2) == 0:
-            b = self.deltaparent(p2)
-            if b == p2:
-                break
-            p2 = b
-        if base == p1 or base == p2:
-            return False
-        return self.issnapshot(base)
+        ret = self._inner.issnapshot(rev)
+        self.issnapshot = self._inner.issnapshot
+        return ret
 
     def snapshotdepth(self, rev):
         """number of snapshot in the chain before this one"""
         if not self.issnapshot(rev):
             raise error.ProgrammingError(b'revision %d not a snapshot')
-        return len(self._deltachain(rev)[0]) - 1
+        return len(self._inner._deltachain(rev)[0]) - 1
 
     def revdiff(self, rev1, rev2):
         """return or calculate a delta between two revisions
@@ -1931,19 +2835,17 @@
         revlog data directly. So this function needs raw revision data.
         """
         if rev1 != nullrev and self.deltaparent(rev2) == rev1:
-            return bytes(self._chunk(rev2))
+            return bytes(self._inner._chunk(rev2))
 
         return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
 
-    def revision(self, nodeorrev, _df=None):
+    def revision(self, nodeorrev):
         """return an uncompressed revision of a given node or revision
         number.
-
-        _df - an existing file handle to read from. (internal-only)
         """
-        return self._revisiondata(nodeorrev, _df)
-
-    def sidedata(self, nodeorrev, _df=None):
+        return self._revisiondata(nodeorrev)
+
+    def sidedata(self, nodeorrev):
         """a map of extra data related to the changeset but not part of the hash
 
         This function currently return a dictionary. However, more advanced
@@ -1957,7 +2859,23 @@
             rev = self.rev(nodeorrev)
         return self._sidedata(rev)
 
-    def _revisiondata(self, nodeorrev, _df=None, raw=False):
+    def _rawtext(self, node, rev):
+        """return the possibly unvalidated rawtext for a revision
+
+        returns (rev, rawtext, validated)
+        """
+        # Check if we have the entry in cache
+        # The cache entry looks like (node, rev, rawtext)
+        if self._inner._revisioncache:
+            if self._inner._revisioncache[0] == node:
+                return (rev, self._inner._revisioncache[2], True)
+
+        if rev is None:
+            rev = self.rev(node)
+
+        return self._inner.raw_text(node, rev)
+
+    def _revisiondata(self, nodeorrev, raw=False):
         # deal with <nodeorrev> argument type
         if isinstance(nodeorrev, int):
             rev = nodeorrev
@@ -1972,7 +2890,7 @@
 
         # ``rawtext`` is the text as stored inside the revlog. Might be the
         # revision or might need to be processed to retrieve the revision.
-        rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
+        rev, rawtext, validated = self._rawtext(node, rev)
 
         if raw and validated:
             # if we don't want to process the raw text and that raw
@@ -1997,97 +2915,20 @@
         if validatehash:
             self.checkhash(text, node, rev=rev)
         if not validated:
-            self._revisioncache = (node, rev, rawtext)
+            self._inner._revisioncache = (node, rev, rawtext)
 
         return text
 
-    def _rawtext(self, node, rev, _df=None):
-        """return the possibly unvalidated rawtext for a revision
-
-        returns (rev, rawtext, validated)
-        """
-
-        # revision in the cache (could be useful to apply delta)
-        cachedrev = None
-        # An intermediate text to apply deltas to
-        basetext = None
-
-        # Check if we have the entry in cache
-        # The cache entry looks like (node, rev, rawtext)
-        if self._revisioncache:
-            if self._revisioncache[0] == node:
-                return (rev, self._revisioncache[2], True)
-            cachedrev = self._revisioncache[1]
-
-        if rev is None:
-            rev = self.rev(node)
-
-        chain, stopped = self._deltachain(rev, stoprev=cachedrev)
-        if stopped:
-            basetext = self._revisioncache[2]
-
-        # drop cache to save memory, the caller is expected to
-        # update self._revisioncache after validating the text
-        self._revisioncache = None
-
-        targetsize = None
-        rawsize = self.index[rev][2]
-        if 0 <= rawsize:
-            targetsize = 4 * rawsize
-
-        bins = self._chunks(chain, df=_df, targetsize=targetsize)
-        if basetext is None:
-            basetext = bytes(bins[0])
-            bins = bins[1:]
-
-        rawtext = mdiff.patches(basetext, bins)
-        del basetext  # let us have a chance to free memory early
-        return (rev, rawtext, False)
-
     def _sidedata(self, rev):
         """Return the sidedata for a given revision number."""
-        index_entry = self.index[rev]
-        sidedata_offset = index_entry[8]
-        sidedata_size = index_entry[9]
-
-        if self._inline:
-            sidedata_offset += self.index.entry_size * (1 + rev)
-        if sidedata_size == 0:
-            return {}
-
-        if self._docket.sidedata_end < sidedata_offset + sidedata_size:
-            filename = self._sidedatafile
-            end = self._docket.sidedata_end
-            offset = sidedata_offset
-            length = sidedata_size
-            m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
-            raise error.RevlogError(m)
-
-        comp_segment = self._segmentfile_sidedata.read_chunk(
-            sidedata_offset, sidedata_size
-        )
-
-        comp = self.index[rev][11]
-        if comp == COMP_MODE_PLAIN:
-            segment = comp_segment
-        elif comp == COMP_MODE_DEFAULT:
-            segment = self._decompressor(comp_segment)
-        elif comp == COMP_MODE_INLINE:
-            segment = self.decompress(comp_segment)
-        else:
-            msg = b'unknown compression mode %d'
-            msg %= comp
-            raise error.RevlogError(msg)
-
-        sidedata = sidedatautil.deserialize_sidedata(segment)
-        return sidedata
-
-    def rawdata(self, nodeorrev, _df=None):
-        """return an uncompressed raw data of a given node or revision number.
-
-        _df - an existing file handle to read from. (internal-only)
-        """
-        return self._revisiondata(nodeorrev, _df, raw=True)
+        sidedata_end = None
+        if self._docket is not None:
+            sidedata_end = self._docket.sidedata_end
+        return self._inner.sidedata(rev, sidedata_end)
+
+    def rawdata(self, nodeorrev):
+        """return an uncompressed raw data of a given node or revision number."""
+        return self._revisiondata(nodeorrev, raw=True)
 
     def hash(self, text, p1, p2):
         """Compute a node hash.
@@ -2113,8 +2954,11 @@
                 # revision data is accessed. But this case should be rare and
                 # it is extra work to teach the cache about the hash
                 # verification state.
-                if self._revisioncache and self._revisioncache[0] == node:
-                    self._revisioncache = None
+                if (
+                    self._inner._revisioncache
+                    and self._inner._revisioncache[0] == node
+                ):
+                    self._inner._revisioncache = None
 
                 revornode = rev
                 if revornode is None:
@@ -2124,7 +2968,9 @@
                     % (self.display_id, pycompat.bytestr(revornode))
                 )
         except error.RevlogError:
-            if self._censorable and storageutil.iscensoredtext(text):
+            if self.feature_config.censorable and storageutil.iscensoredtext(
+                text
+            ):
                 raise error.CensoredNodeError(self.display_id, node, text)
             raise
 
@@ -2159,35 +3005,28 @@
         if not self._inline or total_size < _maxinline:
             return
 
-        troffset = tr.findoffset(self._indexfile)
+        if self._docket is not None:
+            msg = b"inline revlog should not have a docket"
+            raise error.ProgrammingError(msg)
+
+        troffset = tr.findoffset(self._inner.canonical_index_file)
         if troffset is None:
             raise error.RevlogError(
                 _(b"%s not found in the transaction") % self._indexfile
             )
         if troffset:
-            tr.addbackup(self._indexfile, for_offset=True)
+            tr.addbackup(self._inner.canonical_index_file, for_offset=True)
         tr.add(self._datafile, 0)
 
-        existing_handles = False
-        if self._writinghandles is not None:
-            existing_handles = True
-            fp = self._writinghandles[0]
-            fp.flush()
-            fp.close()
-            # We can't use the cached file handle after close(). So prevent
-            # its usage.
-            self._writinghandles = None
-            self._segmentfile.writing_handle = None
-            # No need to deal with sidedata writing handle as it is only
-            # relevant with revlog-v2 which is never inline, not reaching
-            # this code
+        new_index_file_path = None
         if side_write:
             old_index_file_path = self._indexfile
             new_index_file_path = self._split_index_file
             opener = self.opener
             weak_self = weakref.ref(self)
 
-            # the "split" index replace the real index when the transaction is finalized
+            # the "split" index replace the real index when the transaction is
+            # finalized
             def finalize_callback(tr):
                 opener.rename(
                     new_index_file_path,
@@ -2197,11 +3036,14 @@
                 maybe_self = weak_self()
                 if maybe_self is not None:
                     maybe_self._indexfile = old_index_file_path
+                    maybe_self._inner.index_file = maybe_self._indexfile
 
             def abort_callback(tr):
                 maybe_self = weak_self()
                 if maybe_self is not None:
                     maybe_self._indexfile = old_index_file_path
+                    maybe_self._inner.inline = True
+                    maybe_self._inner.index_file = old_index_file_path
 
             tr.registertmp(new_index_file_path)
             if self.target[1] is not None:
@@ -2211,61 +3053,26 @@
             tr.addfinalize(callback_id, finalize_callback)
             tr.addabort(callback_id, abort_callback)
 
-        new_dfh = self._datafp(b'w+')
-        new_dfh.truncate(0)  # drop any potentially existing data
-        try:
-            with self._indexfp() as read_ifh:
-                for r in self:
-                    new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
-                new_dfh.flush()
-
-            if side_write:
-                self._indexfile = new_index_file_path
-            with self.__index_new_fp() as fp:
-                self._format_flags &= ~FLAG_INLINE_DATA
-                self._inline = False
-                for i in self:
-                    e = self.index.entry_binary(i)
-                    if i == 0 and self._docket is None:
-                        header = self._format_flags | self._format_version
-                        header = self.index.pack_header(header)
-                        e = header + e
-                    fp.write(e)
-                if self._docket is not None:
-                    self._docket.index_end = fp.tell()
-
-                # If we don't use side-write, the temp file replace the real
-                # index when we exit the context manager
-
-            nodemaputil.setup_persistent_nodemap(tr, self)
-            self._segmentfile = randomaccessfile.randomaccessfile(
-                self.opener,
-                self._datafile,
-                self._chunkcachesize,
-            )
-
-            if existing_handles:
-                # switched from inline to conventional reopen the index
-                ifh = self.__index_write_fp()
-                self._writinghandles = (ifh, new_dfh, None)
-                self._segmentfile.writing_handle = new_dfh
-                new_dfh = None
-                # No need to deal with sidedata writing handle as it is only
-                # relevant with revlog-v2 which is never inline, not reaching
-                # this code
-        finally:
-            if new_dfh is not None:
-                new_dfh.close()
+        self._format_flags &= ~FLAG_INLINE_DATA
+        self._inner.split_inline(
+            tr,
+            self._format_flags | self._format_version,
+            new_index_file_path=new_index_file_path,
+        )
+
+        self._inline = False
+        if new_index_file_path is not None:
+            self._indexfile = new_index_file_path
+
+        nodemaputil.setup_persistent_nodemap(tr, self)
 
     def _nodeduplicatecallback(self, transaction, node):
         """called when trying to add a node already stored."""
 
     @contextlib.contextmanager
     def reading(self):
-        """Context manager that keeps data and sidedata files open for reading"""
-        with self._segmentfile.reading():
-            with self._segmentfile_sidedata.reading():
-                yield
+        with self._inner.reading():
+            yield
 
     @contextlib.contextmanager
     def _writing(self, transaction):
@@ -2273,65 +3080,26 @@
             msg = b'try to write in a `trypending` revlog: %s'
             msg %= self.display_id
             raise error.ProgrammingError(msg)
-        if self._writinghandles is not None:
+        if self._inner.is_writing:
             yield
         else:
-            ifh = dfh = sdfh = None
-            try:
-                r = len(self)
-                # opening the data file.
-                dsize = 0
-                if r:
-                    dsize = self.end(r - 1)
-                dfh = None
-                if not self._inline:
-                    try:
-                        dfh = self._datafp(b"r+")
-                        if self._docket is None:
-                            dfh.seek(0, os.SEEK_END)
-                        else:
-                            dfh.seek(self._docket.data_end, os.SEEK_SET)
-                    except FileNotFoundError:
-                        dfh = self._datafp(b"w+")
-                    transaction.add(self._datafile, dsize)
-                if self._sidedatafile is not None:
-                    # revlog-v2 does not inline, help Pytype
-                    assert dfh is not None
-                    try:
-                        sdfh = self.opener(self._sidedatafile, mode=b"r+")
-                        dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
-                    except FileNotFoundError:
-                        sdfh = self.opener(self._sidedatafile, mode=b"w+")
-                    transaction.add(
-                        self._sidedatafile, self._docket.sidedata_end
-                    )
-
-                # opening the index file.
-                isize = r * self.index.entry_size
-                ifh = self.__index_write_fp()
-                if self._inline:
-                    transaction.add(self._indexfile, dsize + isize)
-                else:
-                    transaction.add(self._indexfile, isize)
-                # exposing all file handle for writing.
-                self._writinghandles = (ifh, dfh, sdfh)
-                self._segmentfile.writing_handle = ifh if self._inline else dfh
-                self._segmentfile_sidedata.writing_handle = sdfh
+            data_end = None
+            sidedata_end = None
+            if self._docket is not None:
+                data_end = self._docket.data_end
+                sidedata_end = self._docket.sidedata_end
+            with self._inner.writing(
+                transaction,
+                data_end=data_end,
+                sidedata_end=sidedata_end,
+            ):
                 yield
                 if self._docket is not None:
                     self._write_docket(transaction)
-            finally:
-                self._writinghandles = None
-                self._segmentfile.writing_handle = None
-                self._segmentfile_sidedata.writing_handle = None
-                if dfh is not None:
-                    dfh.close()
-                if sdfh is not None:
-                    sdfh.close()
-                # closing the index file last to avoid exposing referent to
-                # potential unflushed data content.
-                if ifh is not None:
-                    ifh.close()
+
+    @property
+    def is_delaying(self):
+        return self._inner.is_delaying
 
     def _write_docket(self, transaction):
         """write the current docket on disk
@@ -2376,7 +3144,7 @@
 
         if sidedata is None:
             sidedata = {}
-        elif sidedata and not self.hassidedata:
+        elif sidedata and not self.feature_config.has_side_data:
             raise error.ProgrammingError(
                 _(b"trying to add sidedata to a revlog who don't support them")
             )
@@ -2452,69 +3220,10 @@
             )
 
     def compress(self, data):
-        """Generate a possibly-compressed representation of data."""
-        if not data:
-            return b'', data
-
-        compressed = self._compressor.compress(data)
-
-        if compressed:
-            # The revlog compressor added the header in the returned data.
-            return b'', compressed
-
-        if data[0:1] == b'\0':
-            return b'', data
-        return b'u', data
+        return self._inner.compress(data)
 
     def decompress(self, data):
-        """Decompress a revlog chunk.
-
-        The chunk is expected to begin with a header identifying the
-        format type so it can be routed to an appropriate decompressor.
-        """
-        if not data:
-            return data
-
-        # Revlogs are read much more frequently than they are written and many
-        # chunks only take microseconds to decompress, so performance is
-        # important here.
-        #
-        # We can make a few assumptions about revlogs:
-        #
-        # 1) the majority of chunks will be compressed (as opposed to inline
-        #    raw data).
-        # 2) decompressing *any* data will likely by at least 10x slower than
-        #    returning raw inline data.
-        # 3) we want to prioritize common and officially supported compression
-        #    engines
-        #
-        # It follows that we want to optimize for "decompress compressed data
-        # when encoded with common and officially supported compression engines"
-        # case over "raw data" and "data encoded by less common or non-official
-        # compression engines." That is why we have the inline lookup first
-        # followed by the compengines lookup.
-        #
-        # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
-        # compressed chunks. And this matters for changelog and manifest reads.
-        t = data[0:1]
-
-        if t == b'x':
-            try:
-                return _zlibdecompress(data)
-            except zlib.error as e:
-                raise error.RevlogError(
-                    _(b'revlog decompress error: %s')
-                    % stringutil.forcebytestr(e)
-                )
-        # '\0' is more common than 'u' so it goes first.
-        elif t == b'\0':
-            return data
-        elif t == b'u':
-            return util.buffer(data, 1)
-
-        compressor = self._get_decompressor(t)
-
-        return compressor.decompress(data)
+        return self._inner.decompress(data)
 
     def _addrevision(
         self,
@@ -2554,15 +3263,10 @@
             raise error.RevlogError(
                 _(b"%s: attempt to add wdir revision") % self.display_id
             )
-        if self._writinghandles is None:
+        if self._inner._writinghandles is None:
             msg = b'adding revision outside `revlog._writing` context'
             raise error.ProgrammingError(msg)
 
-        if self._inline:
-            fh = self._writinghandles[0]
-        else:
-            fh = self._writinghandles[1]
-
         btext = [rawtext]
 
         curr = len(self)
@@ -2571,7 +3275,7 @@
         offset = self._get_data_offset(prev)
 
         if self._concurrencychecker:
-            ifh, dfh, sdfh = self._writinghandles
+            ifh, dfh, sdfh = self._inner._writinghandles
             # XXX no checking for the sidedata file
             if self._inline:
                 # offset is "as if" it were in the .d file, so we need to add on
@@ -2602,7 +3306,7 @@
 
         if deltacomputer is None:
             write_debug = None
-            if self._debug_delta:
+            if self.delta_config.debug_delta:
                 write_debug = transaction._report
             deltacomputer = deltautil.deltacomputer(
                 self, write_debug=write_debug
@@ -2612,7 +3316,10 @@
             # If the cached delta has no information about how it should be
             # reused, add the default reuse instruction according to the
             # revlog's configuration.
-            if self._generaldelta and self._lazydeltabase:
+            if (
+                self.delta_config.general_delta
+                and self.delta_config.lazy_delta_base
+            ):
                 delta_base_reuse = DELTA_BASE_REUSE_TRY
             else:
                 delta_base_reuse = DELTA_BASE_REUSE_NO
@@ -2628,7 +3335,7 @@
             flags,
         )
 
-        deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
+        deltainfo = deltacomputer.finddeltainfo(revinfo)
 
         compression_mode = COMP_MODE_INLINE
         if self._docket is not None:
@@ -2637,11 +3344,11 @@
             compression_mode, deltainfo = r
 
         sidedata_compression_mode = COMP_MODE_INLINE
-        if sidedata and self.hassidedata:
+        if sidedata and self.feature_config.has_side_data:
             sidedata_compression_mode = COMP_MODE_PLAIN
             serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
             sidedata_offset = self._docket.sidedata_end
-            h, comp_sidedata = self.compress(serialized_sidedata)
+            h, comp_sidedata = self._inner.compress(serialized_sidedata)
             if (
                 h != b'u'
                 and comp_sidedata[0:1] != b'\0'
@@ -2665,7 +3372,7 @@
             sidedata_offset = 0
 
         rank = RANK_UNKNOWN
-        if self._compute_rank:
+        if self.feature_config.compute_rank:
             if (p1r, p2r) == (nullrev, nullrev):
                 rank = 1
             elif p1r != nullrev and p2r == nullrev:
@@ -2716,10 +3423,10 @@
         rawtext = btext[0]
 
         if alwayscache and rawtext is None:
-            rawtext = deltacomputer.buildtext(revinfo, fh)
+            rawtext = deltacomputer.buildtext(revinfo)
 
         if type(rawtext) == bytes:  # only accept immutable objects
-            self._revisioncache = (node, curr, rawtext)
+            self._inner._revisioncache = (node, curr, rawtext)
         self._chainbasecache[curr] = deltainfo.chainbase
         return curr
 
@@ -2738,7 +3445,14 @@
             return self._docket.data_end
 
     def _writeentry(
-        self, transaction, entry, data, link, offset, sidedata, sidedata_offset
+        self,
+        transaction,
+        entry,
+        data,
+        link,
+        offset,
+        sidedata,
+        sidedata_offset,
     ):
         # Files opened in a+ mode have inconsistent behavior on various
         # platforms. Windows requires that a file positioning call be made
@@ -2752,53 +3466,29 @@
         # Note: This is likely not necessary on Python 3. However, because
         # the file handle is reused for reads and may be seeked there, we need
         # to be careful before changing this.
-        if self._writinghandles is None:
-            msg = b'adding revision outside `revlog._writing` context'
-            raise error.ProgrammingError(msg)
-        ifh, dfh, sdfh = self._writinghandles
-        if self._docket is None:
-            ifh.seek(0, os.SEEK_END)
-        else:
-            ifh.seek(self._docket.index_end, os.SEEK_SET)
-        if dfh:
-            if self._docket is None:
-                dfh.seek(0, os.SEEK_END)
-            else:
-                dfh.seek(self._docket.data_end, os.SEEK_SET)
-        if sdfh:
-            sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
-
-        curr = len(self) - 1
-        if not self._inline:
-            transaction.add(self._datafile, offset)
-            if self._sidedatafile:
-                transaction.add(self._sidedatafile, sidedata_offset)
-            transaction.add(self._indexfile, curr * len(entry))
-            if data[0]:
-                dfh.write(data[0])
-            dfh.write(data[1])
-            if sidedata:
-                sdfh.write(sidedata)
-            ifh.write(entry)
-        else:
-            offset += curr * self.index.entry_size
-            transaction.add(self._indexfile, offset)
-            ifh.write(entry)
-            ifh.write(data[0])
-            ifh.write(data[1])
-            assert not sidedata
-            self._enforceinlinesize(transaction)
+        index_end = data_end = sidedata_end = None
         if self._docket is not None:
-            # revlog-v2 always has 3 writing handles, help Pytype
-            wh1 = self._writinghandles[0]
-            wh2 = self._writinghandles[1]
-            wh3 = self._writinghandles[2]
-            assert wh1 is not None
-            assert wh2 is not None
-            assert wh3 is not None
-            self._docket.index_end = wh1.tell()
-            self._docket.data_end = wh2.tell()
-            self._docket.sidedata_end = wh3.tell()
+            index_end = self._docket.index_end
+            data_end = self._docket.data_end
+            sidedata_end = self._docket.sidedata_end
+
+        files_end = self._inner.write_entry(
+            transaction,
+            entry,
+            data,
+            link,
+            offset,
+            sidedata,
+            sidedata_offset,
+            index_end,
+            data_end,
+            sidedata_end,
+        )
+        self._enforceinlinesize(transaction)
+        if self._docket is not None:
+            self._docket.index_end = files_end[0]
+            self._docket.data_end = files_end[1]
+            self._docket.sidedata_end = files_end[2]
 
         nodemaputil.setup_persistent_nodemap(transaction, self)
 
@@ -2830,7 +3520,10 @@
         # read the default delta-base reuse policy from revlog config if the
         # group did not specify one.
         if delta_base_reuse_policy is None:
-            if self._generaldelta and self._lazydeltabase:
+            if (
+                self.delta_config.general_delta
+                and self.delta_config.lazy_delta_base
+            ):
                 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
             else:
                 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
@@ -2840,7 +3533,7 @@
         try:
             with self._writing(transaction):
                 write_debug = None
-                if self._debug_delta:
+                if self.delta_config.debug_delta:
                     write_debug = transaction._report
                 deltacomputer = deltautil.deltacomputer(
                     self,
@@ -2930,14 +3623,14 @@
 
     def iscensored(self, rev):
         """Check if a file revision is censored."""
-        if not self._censorable:
+        if not self.feature_config.censorable:
             return False
 
         return self.flags(rev) & REVIDX_ISCENSORED
 
     def _peek_iscensored(self, baserev, delta):
         """Quickly check if a delta produces a censored revision."""
-        if not self._censorable:
+        if not self.feature_config.censorable:
             return False
 
         return storageutil.deltaiscensored(delta, baserev, self.rawsize)
@@ -2999,10 +3692,8 @@
             self._docket.write(transaction, stripping=True)
 
         # then reset internal state in memory to forget those revisions
-        self._revisioncache = None
         self._chaininfocache = util.lrucachedict(500)
-        self._segmentfile.clear_cache()
-        self._segmentfile_sidedata.clear_cache()
+        self._inner.clear_cache()
 
         del self.index[rev:-1]
 
@@ -3047,6 +3738,7 @@
         return (dd, di)
 
     def files(self):
+        """return list of files that compose this revlog"""
         res = [self._indexfile]
         if self._docket_file is None:
             if not self._inline:
@@ -3077,7 +3769,7 @@
                 b'unhandled value for nodesorder: %s' % nodesorder
             )
 
-        if nodesorder is None and not self._generaldelta:
+        if nodesorder is None and not self.delta_config.general_delta:
             nodesorder = b'storage'
 
         if (
@@ -3092,7 +3784,7 @@
             nodesorder,
             revlogrevisiondelta,
             deltaparentfn=self.deltaparent,
-            candeltafn=self.candelta,
+            candeltafn=self._candelta,
             rawsizefn=self.rawsize,
             revdifffn=self.revdiff,
             flagsfn=self.flags,
@@ -3179,22 +3871,24 @@
 
         # lazydelta and lazydeltabase controls whether to reuse a cached delta,
         # if possible.
-        oldlazydelta = destrevlog._lazydelta
-        oldlazydeltabase = destrevlog._lazydeltabase
-        oldamd = destrevlog._deltabothparents
+        old_delta_config = destrevlog.delta_config
+        destrevlog.delta_config = destrevlog.delta_config.copy()
 
         try:
             if deltareuse == self.DELTAREUSEALWAYS:
-                destrevlog._lazydeltabase = True
-                destrevlog._lazydelta = True
+                destrevlog.delta_config.lazy_delta_base = True
+                destrevlog.delta_config.lazy_delta = True
             elif deltareuse == self.DELTAREUSESAMEREVS:
-                destrevlog._lazydeltabase = False
-                destrevlog._lazydelta = True
+                destrevlog.delta_config.lazy_delta_base = False
+                destrevlog.delta_config.lazy_delta = True
             elif deltareuse == self.DELTAREUSENEVER:
-                destrevlog._lazydeltabase = False
-                destrevlog._lazydelta = False
-
-            destrevlog._deltabothparents = forcedeltabothparents or oldamd
+                destrevlog.delta_config.lazy_delta_base = False
+                destrevlog.delta_config.lazy_delta = False
+
+            delta_both_parents = (
+                forcedeltabothparents or old_delta_config.delta_both_parents
+            )
+            destrevlog.delta_config.delta_both_parents = delta_both_parents
 
             with self.reading(), destrevlog._writing(tr):
                 self._clone(
@@ -3207,9 +3901,7 @@
                 )
 
         finally:
-            destrevlog._lazydelta = oldlazydelta
-            destrevlog._lazydeltabase = oldlazydeltabase
-            destrevlog._deltabothparents = oldamd
+            destrevlog.delta_config = old_delta_config
 
     def _clone(
         self,
@@ -3222,7 +3914,7 @@
     ):
         """perform the core duty of `revlog.clone` after parameter processing"""
         write_debug = None
-        if self._debug_delta:
+        if self.delta_config.debug_delta:
             write_debug = tr._report
         deltacomputer = deltautil.deltacomputer(
             destrevlog,
@@ -3267,10 +3959,10 @@
                     sidedata=sidedata,
                 )
             else:
-                if destrevlog._lazydelta:
+                if destrevlog.delta_config.lazy_delta:
                     dp = self.deltaparent(rev)
                     if dp != nullrev:
-                        cachedelta = (dp, bytes(self._chunk(rev)))
+                        cachedelta = (dp, bytes(self._inner._chunk(rev)))
 
                 sidedata = None
                 if not cachedelta:
@@ -3453,7 +4145,7 @@
         return d
 
     def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
-        if not self.hassidedata:
+        if not self.feature_config.has_side_data:
             return
         # revlog formats with sidedata support does not support inline
         assert not self._inline
@@ -3464,7 +4156,7 @@
         new_entries = []
         # append the new sidedata
         with self._writing(transaction):
-            ifh, dfh, sdfh = self._writinghandles
+            ifh, dfh, sdfh = self._inner._writinghandles
             dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
 
             current_offset = sdfh.tell()
@@ -3482,9 +4174,9 @@
                 )
 
                 sidedata_compression_mode = COMP_MODE_INLINE
-                if serialized_sidedata and self.hassidedata:
+                if serialized_sidedata and self.feature_config.has_side_data:
                     sidedata_compression_mode = COMP_MODE_PLAIN
-                    h, comp_sidedata = self.compress(serialized_sidedata)
+                    h, comp_sidedata = self._inner.compress(serialized_sidedata)
                     if (
                         h != b'u'
                         and comp_sidedata[0] != b'\0'
--- a/mercurial/revlogutils/debug.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlogutils/debug.py	Tue Nov 07 15:21:11 2023 +0100
@@ -13,7 +13,6 @@
     mdiff,
     node as nodemod,
     revlogutils,
-    util,
 )
 
 from . import (
@@ -347,83 +346,86 @@
             l[1] = size
         l[2] += size
 
-    numrevs = len(r)
-    for rev in range(numrevs):
-        p1, p2 = r.parentrevs(rev)
-        delta = r.deltaparent(rev)
-        if format > 0:
-            s = r.rawsize(rev)
-            full_text_total_size += s
-            addsize(s, datasize)
-        if p2 != nodemod.nullrev:
-            nummerges += 1
-        size = r.length(rev)
-        if delta == nodemod.nullrev:
-            chainlengths.append(0)
-            chainbases.append(r.start(rev))
-            chainspans.append(size)
-            if size == 0:
-                numempty += 1
-                numemptytext += 1
+    with r.reading():
+        numrevs = len(r)
+        for rev in range(numrevs):
+            p1, p2 = r.parentrevs(rev)
+            delta = r.deltaparent(rev)
+            if format > 0:
+                s = r.rawsize(rev)
+                full_text_total_size += s
+                addsize(s, datasize)
+            if p2 != nodemod.nullrev:
+                nummerges += 1
+            size = r.length(rev)
+            if delta == nodemod.nullrev:
+                chainlengths.append(0)
+                chainbases.append(r.start(rev))
+                chainspans.append(size)
+                if size == 0:
+                    numempty += 1
+                    numemptytext += 1
+                else:
+                    numfull += 1
+                    numsnapdepth[0] += 1
+                    addsize(size, fullsize)
+                    addsize(size, snapsizedepth[0])
             else:
-                numfull += 1
-                numsnapdepth[0] += 1
-                addsize(size, fullsize)
-                addsize(size, snapsizedepth[0])
-        else:
-            nad = (
-                delta != p1 and delta != p2 and not r.isancestorrev(delta, rev)
-            )
-            chainlengths.append(chainlengths[delta] + 1)
-            baseaddr = chainbases[delta]
-            revaddr = r.start(rev)
-            chainbases.append(baseaddr)
-            chainspans.append((revaddr - baseaddr) + size)
-            if size == 0:
-                numempty += 1
-                numemptydelta += 1
-            elif r.issnapshot(rev):
-                addsize(size, semisize)
-                numsemi += 1
-                depth = r.snapshotdepth(rev)
-                numsnapdepth[depth] += 1
-                if nad:
-                    numsnapdepth_nad[depth] += 1
-                addsize(size, snapsizedepth[depth])
+                nad = (
+                    delta != p1
+                    and delta != p2
+                    and not r.isancestorrev(delta, rev)
+                )
+                chainlengths.append(chainlengths[delta] + 1)
+                baseaddr = chainbases[delta]
+                revaddr = r.start(rev)
+                chainbases.append(baseaddr)
+                chainspans.append((revaddr - baseaddr) + size)
+                if size == 0:
+                    numempty += 1
+                    numemptydelta += 1
+                elif r.issnapshot(rev):
+                    addsize(size, semisize)
+                    numsemi += 1
+                    depth = r.snapshotdepth(rev)
+                    numsnapdepth[depth] += 1
+                    if nad:
+                        numsnapdepth_nad[depth] += 1
+                    addsize(size, snapsizedepth[depth])
+                else:
+                    addsize(size, deltasize)
+                    if delta == rev - 1:
+                        numprev += 1
+                        if delta == p1:
+                            nump1prev += 1
+                        elif delta == p2:
+                            nump2prev += 1
+                        elif nad:
+                            numprev_nad += 1
+                    elif delta == p1:
+                        nump1 += 1
+                    elif delta == p2:
+                        nump2 += 1
+                    elif delta != nodemod.nullrev:
+                        numother += 1
+                        numother_nad += 1
+
+            # Obtain data on the raw chunks in the revlog.
+            if hasattr(r, '_inner'):
+                segment = r._inner.get_segment_for_revs(rev, rev)[1]
             else:
-                addsize(size, deltasize)
-                if delta == rev - 1:
-                    numprev += 1
-                    if delta == p1:
-                        nump1prev += 1
-                    elif delta == p2:
-                        nump2prev += 1
-                    elif nad:
-                        numprev_nad += 1
-                elif delta == p1:
-                    nump1 += 1
-                elif delta == p2:
-                    nump2 += 1
-                elif delta != nodemod.nullrev:
-                    numother += 1
-                    numother_nad += 1
+                segment = r._revlog._getsegmentforrevs(rev, rev)[1]
+            if segment:
+                chunktype = bytes(segment[0:1])
+            else:
+                chunktype = b'empty'
 
-        # Obtain data on the raw chunks in the revlog.
-        if util.safehasattr(r, '_getsegmentforrevs'):
-            segment = r._getsegmentforrevs(rev, rev)[1]
-        else:
-            segment = r._revlog._getsegmentforrevs(rev, rev)[1]
-        if segment:
-            chunktype = bytes(segment[0:1])
-        else:
-            chunktype = b'empty'
+            if chunktype not in chunktypecounts:
+                chunktypecounts[chunktype] = 0
+                chunktypesizes[chunktype] = 0
 
-        if chunktype not in chunktypecounts:
-            chunktypecounts[chunktype] = 0
-            chunktypesizes[chunktype] = 0
-
-        chunktypecounts[chunktype] += 1
-        chunktypesizes[chunktype] += size
+            chunktypecounts[chunktype] += 1
+            chunktypesizes[chunktype] += size
 
     # Adjust size min value for empty cases
     for size in (datasize, fullsize, semisize, deltasize):
@@ -708,3 +710,234 @@
         fm.write(b'revlog.target', b' %s', revlog_target)
 
         fm.plain(b'\n')
+
+
+class DeltaChainAuditor:
+    def __init__(self, revlog):
+        self._revlog = revlog
+        self._index = self._revlog.index
+        self._generaldelta = revlog.delta_config.general_delta
+        self._chain_size_cache = {}
+        # security to avoid crash on corrupted revlogs
+        self._total_revs = len(self._index)
+
+    def revinfo(self, rev, size_info=True, dist_info=True, sparse_info=True):
+        e = self._index[rev]
+        compsize = e[constants.ENTRY_DATA_COMPRESSED_LENGTH]
+        uncompsize = e[constants.ENTRY_DATA_UNCOMPRESSED_LENGTH]
+
+        base = e[constants.ENTRY_DELTA_BASE]
+        p1 = e[constants.ENTRY_PARENT_1]
+        p2 = e[constants.ENTRY_PARENT_2]
+
+        # If the parents of a revision has an empty delta, we never try to
+        # delta against that parent, but directly against the delta base of
+        # that parent (recursively). It avoids adding a useless entry in the
+        # chain.
+        #
+        # However we need to detect that as a special case for delta-type, that
+        # is not simply "other".
+        p1_base = p1
+        if p1 != nodemod.nullrev and p1 < self._total_revs:
+            e1 = self._index[p1]
+            while e1[constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
+                new_base = e1[constants.ENTRY_DELTA_BASE]
+                if (
+                    new_base == p1_base
+                    or new_base == nodemod.nullrev
+                    or new_base >= self._total_revs
+                ):
+                    break
+                p1_base = new_base
+                e1 = self._index[p1_base]
+        p2_base = p2
+        if p2 != nodemod.nullrev and p2 < self._total_revs:
+            e2 = self._index[p2]
+            while e2[constants.ENTRY_DATA_COMPRESSED_LENGTH] == 0:
+                new_base = e2[constants.ENTRY_DELTA_BASE]
+                if (
+                    new_base == p2_base
+                    or new_base == nodemod.nullrev
+                    or new_base >= self._total_revs
+                ):
+                    break
+                p2_base = new_base
+                e2 = self._index[p2_base]
+
+        if self._generaldelta:
+            if base == p1:
+                deltatype = b'p1'
+            elif base == p2:
+                deltatype = b'p2'
+            elif base == rev:
+                deltatype = b'base'
+            elif base == p1_base:
+                deltatype = b'skip1'
+            elif base == p2_base:
+                deltatype = b'skip2'
+            elif self._revlog.issnapshot(rev):
+                deltatype = b'snap'
+            elif base == rev - 1:
+                deltatype = b'prev'
+            else:
+                deltatype = b'other'
+        else:
+            if base == rev:
+                deltatype = b'base'
+            else:
+                deltatype = b'prev'
+
+        chain = self._revlog._deltachain(rev)[0]
+
+        data = {
+            'p1': p1,
+            'p2': p2,
+            'compressed_size': compsize,
+            'uncompressed_size': uncompsize,
+            'deltatype': deltatype,
+            'chain': chain,
+        }
+
+        if size_info or dist_info or sparse_info:
+            chain_size = 0
+            for iter_rev in reversed(chain):
+                cached = self._chain_size_cache.get(iter_rev)
+                if cached is not None:
+                    chain_size += cached
+                    break
+                e = self._index[iter_rev]
+                chain_size += e[constants.ENTRY_DATA_COMPRESSED_LENGTH]
+            self._chain_size_cache[rev] = chain_size
+            data['chain_size'] = chain_size
+
+        return data
+
+
+def debug_delta_chain(
+    revlog,
+    revs=None,
+    size_info=True,
+    dist_info=True,
+    sparse_info=True,
+):
+    auditor = DeltaChainAuditor(revlog)
+    r = revlog
+    start = r.start
+    length = r.length
+    withsparseread = revlog.data_config.with_sparse_read
+
+    header = (
+        b'    rev'
+        b'      p1'
+        b'      p2'
+        b'  chain#'
+        b' chainlen'
+        b'     prev'
+        b'   delta'
+    )
+    if size_info:
+        header += b'       size' b'    rawsize' b'  chainsize' b'     ratio'
+    if dist_info:
+        header += b'   lindist' b' extradist' b' extraratio'
+    if withsparseread and sparse_info:
+        header += b'   readsize' b' largestblk' b' rddensity' b' srchunks'
+    header += b'\n'
+    yield header
+
+    if revs is None:
+        all_revs = iter(r)
+    else:
+        revlog_size = len(r)
+        all_revs = sorted(rev for rev in revs if rev < revlog_size)
+
+    chainbases = {}
+    for rev in all_revs:
+        info = auditor.revinfo(
+            rev,
+            size_info=size_info,
+            dist_info=dist_info,
+            sparse_info=sparse_info,
+        )
+        comp = info['compressed_size']
+        uncomp = info['uncompressed_size']
+        chain = info['chain']
+        chainbase = chain[0]
+        chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
+        if dist_info:
+            basestart = start(chainbase)
+            revstart = start(rev)
+            lineardist = revstart + comp - basestart
+            extradist = lineardist - info['chain_size']
+        try:
+            prevrev = chain[-2]
+        except IndexError:
+            prevrev = -1
+
+        if size_info:
+            chainsize = info['chain_size']
+            if uncomp != 0:
+                chainratio = float(chainsize) / float(uncomp)
+            else:
+                chainratio = chainsize
+
+        if dist_info:
+            if chainsize != 0:
+                extraratio = float(extradist) / float(chainsize)
+            else:
+                extraratio = extradist
+
+        # label, display-format, data-key, value
+        entry = [
+            (b'rev', b'%7d', 'rev', rev),
+            (b'p1', b'%7d', 'p1', info['p1']),
+            (b'p2', b'%7d', 'p2', info['p2']),
+            (b'chainid', b'%7d', 'chainid', chainid),
+            (b'chainlen', b'%8d', 'chainlen', len(chain)),
+            (b'prevrev', b'%8d', 'prevrev', prevrev),
+            (b'deltatype', b'%7s', 'deltatype', info['deltatype']),
+        ]
+        if size_info:
+            entry.extend(
+                [
+                    (b'compsize', b'%10d', 'compsize', comp),
+                    (b'uncompsize', b'%10d', 'uncompsize', uncomp),
+                    (b'chainsize', b'%10d', 'chainsize', chainsize),
+                    (b'chainratio', b'%9.5f', 'chainratio', chainratio),
+                ]
+            )
+        if dist_info:
+            entry.extend(
+                [
+                    (b'lindist', b'%9d', 'lindist', lineardist),
+                    (b'extradist', b'%9d', 'extradist', extradist),
+                    (b'extraratio', b'%10.5f', 'extraratio', extraratio),
+                ]
+            )
+        if withsparseread and sparse_info:
+            chainsize = info['chain_size']
+            readsize = 0
+            largestblock = 0
+            srchunks = 0
+
+            for revschunk in deltautil.slicechunk(r, chain):
+                srchunks += 1
+                blkend = start(revschunk[-1]) + length(revschunk[-1])
+                blksize = blkend - start(revschunk[0])
+
+                readsize += blksize
+                if largestblock < blksize:
+                    largestblock = blksize
+
+            if readsize:
+                readdensity = float(chainsize) / float(readsize)
+            else:
+                readdensity = 1
+            entry.extend(
+                [
+                    (b'readsize', b'%10d', 'readsize', readsize),
+                    (b'largestblock', b'%10d', 'largestblock', largestblock),
+                    (b'readdensity', b'%9.5f', 'readdensity', readdensity),
+                    (b'srchunks', b'%8d', 'srchunks', srchunks),
+                ]
+            )
+        yield entry
--- a/mercurial/revlogutils/deltas.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlogutils/deltas.py	Tue Nov 07 15:21:11 2023 +0100
@@ -14,7 +14,6 @@
 # import stuff from node for others to import from revlog
 from ..node import nullrev
 from ..i18n import _
-from ..pycompat import getattr
 
 from .constants import (
     COMP_MODE_DEFAULT,
@@ -48,9 +47,14 @@
 
     def __init__(self, data, density=0.5, mingap=0, snapshot=()):
         """data is an list of revision payload boundaries"""
+        from .. import revlog
+
         self._data = data
-        self._srdensitythreshold = density
-        self._srmingapsize = mingap
+        self.data_config = revlog.DataConfig()
+        self.data_config.sr_density_threshold = density
+        self.data_config.sr_min_gap_size = mingap
+        self.delta_config = revlog.DeltaConfig()
+        self.feature_config = revlog.FeatureConfig()
         self._snapshot = set(snapshot)
         self.index = None
 
@@ -85,12 +89,12 @@
     Assume that revs are sorted.
 
     The initial chunk is sliced until the overall density (payload/chunks-span
-    ratio) is above `revlog._srdensitythreshold`. No gap smaller than
-    `revlog._srmingapsize` is skipped.
+    ratio) is above `revlog.data_config.sr_density_threshold`. No gap smaller
+    than `revlog.data_config.sr_min_gap_size` is skipped.
 
     If `targetsize` is set, no chunk larger than `targetsize` will be yield.
     For consistency with other slicing choice, this limit won't go lower than
-    `revlog._srmingapsize`.
+    `revlog.data_config.sr_min_gap_size`.
 
     If individual revisions chunk are larger than this limit, they will still
     be raised individually.
@@ -139,14 +143,16 @@
     [[-1], [13], [15]]
     """
     if targetsize is not None:
-        targetsize = max(targetsize, revlog._srmingapsize)
+        targetsize = max(targetsize, revlog.data_config.sr_min_gap_size)
     # targetsize should not be specified when evaluating delta candidates:
     # * targetsize is used to ensure we stay within specification when reading,
     densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
     if densityslicing is None:
         densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
     for chunk in densityslicing(
-        revs, revlog._srdensitythreshold, revlog._srmingapsize
+        revs,
+        revlog.data_config.sr_density_threshold,
+        revlog.data_config.sr_min_gap_size,
     ):
         for subchunk in _slicechunktosize(revlog, chunk, targetsize):
             yield subchunk
@@ -517,7 +523,7 @@
     return end - revlog.start(revs[0])
 
 
-def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
+def _textfromdelta(revlog, baserev, delta, p1, p2, flags, expectednode):
     """build full text from a (base, delta) pair and other metadata"""
     # special case deltas which replace entire base; no need to decode
     # base revision. this neatly avoids censored bases, which throw when
@@ -530,7 +536,7 @@
     else:
         # deltabase is rawtext before changed by flag processors, which is
         # equivalent to non-raw text
-        basetext = revlog.revision(baserev, _df=fh)
+        basetext = revlog.revision(baserev)
         fulltext = mdiff.patch(basetext, delta)
 
     try:
@@ -591,7 +597,7 @@
     assert (
         revinfo.cachedelta is None
         or revinfo.cachedelta[2] != DELTA_BASE_REUSE_FORCE
-        or not revlog._generaldelta
+        or not revlog.delta_config.general_delta
     )
 
     # - 'deltainfo.distance' is the distance from the base revision --
@@ -602,7 +608,7 @@
 
     textlen = revinfo.textlen
     defaultmax = textlen * 4
-    maxdist = revlog._maxdeltachainspan
+    maxdist = revlog.delta_config.max_deltachain_span
     if not maxdist:
         maxdist = deltainfo.distance  # ensure the conditional pass
     maxdist = max(maxdist, defaultmax)
@@ -616,7 +622,7 @@
     #   possible to build pathological revlog where delta pattern would lead
     #   to too many reads. However, they do not happen in practice at all. So
     #   we skip the span check entirely.
-    if not revlog._sparserevlog and maxdist < deltainfo.distance:
+    if not revlog.delta_config.sparse_revlog and maxdist < deltainfo.distance:
         return False
 
     # Bad delta from new delta size:
@@ -635,7 +641,10 @@
     # Bad delta from chain length:
     #
     #   If the number of delta in the chain gets too high.
-    if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen:
+    if (
+        revlog.delta_config.max_chain_len
+        and revlog.delta_config.max_chain_len < deltainfo.chainlen
+    ):
         return False
 
     # bad delta from intermediate snapshot size limit
@@ -689,7 +698,7 @@
     if target_rev is None:
         target_rev = len(revlog)
 
-    if not revlog._generaldelta:
+    if not revlog.delta_config.general_delta:
         # before general delta, there is only one possible delta base
         yield (target_rev - 1,)
         yield None
@@ -701,16 +710,16 @@
     assert (
         cachedelta is None
         or cachedelta[2] != DELTA_BASE_REUSE_FORCE
-        or not revlog._generaldelta
+        or not revlog.delta_config.general_delta
     )
 
     deltalength = revlog.length
     deltaparent = revlog.deltaparent
-    sparse = revlog._sparserevlog
+    sparse = revlog.delta_config.sparse_revlog
     good = None
 
     deltas_limit = textlen * LIMIT_DELTA2TEXT
-    group_chunk_size = revlog._candidate_group_chunk_size
+    group_chunk_size = revlog.delta_config.candidate_group_chunk_size
 
     tested = {nullrev}
     candidates = _refinedgroups(
@@ -765,15 +774,18 @@
             # here too.
             chainlen, chainsize = revlog._chaininfo(rev)
             # if chain will be too long, skip base
-            if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
+            if (
+                revlog.delta_config.max_chain_len
+                and chainlen >= revlog.delta_config.max_chain_len
+            ):
                 tested.add(rev)
                 continue
             # if chain already have too much data, skip base
             if deltas_limit < chainsize:
                 tested.add(rev)
                 continue
-            if sparse and revlog.upperboundcomp is not None:
-                maxcomp = revlog.upperboundcomp
+            if sparse and revlog.delta_config.upper_bound_comp is not None:
+                maxcomp = revlog.delta_config.upper_bound_comp
                 basenotsnap = (p1, p2, nullrev)
                 if rev not in basenotsnap and revlog.issnapshot(rev):
                     snapshotdepth = revlog.snapshotdepth(rev)
@@ -863,7 +875,7 @@
             break
 
     # If sparse revlog is enabled, we can try to refine the available deltas
-    if not revlog._sparserevlog:
+    if not revlog.delta_config.sparse_revlog:
         yield None
         return
 
@@ -902,9 +914,9 @@
     The group order aims at providing fast or small candidates first.
     """
     # Why search for delta base if we cannot use a delta base ?
-    assert revlog._generaldelta
+    assert revlog.delta_config.general_delta
     # also see issue6056
-    sparse = revlog._sparserevlog
+    sparse = revlog.delta_config.sparse_revlog
     curr = len(revlog)
     prev = curr - 1
     deltachain = lambda rev: revlog._deltachain(rev)[0]
@@ -912,7 +924,7 @@
     # exclude already lazy tested base if any
     parents = [p for p in (p1, p2) if p != nullrev]
 
-    if not revlog._deltabothparents and len(parents) == 2:
+    if not revlog.delta_config.delta_both_parents and len(parents) == 2:
         parents.sort()
         # To minimize the chance of having to build a fulltext,
         # pick first whichever parent is closest to us (max rev)
@@ -1060,7 +1072,7 @@
             end_rev < self._start_rev or end_rev > self._end_rev
         ), (self._start_rev, self._end_rev, start_rev, end_rev)
         cache = self.snapshots
-        if util.safehasattr(revlog.index, 'findsnapshots'):
+        if hasattr(revlog.index, 'findsnapshots'):
             revlog.index.findsnapshots(cache, start_rev, end_rev)
         else:
             deltaparent = revlog.deltaparent
@@ -1091,12 +1103,10 @@
     def _gather_debug(self):
         return self._write_debug is not None or self._debug_info is not None
 
-    def buildtext(self, revinfo, fh):
+    def buildtext(self, revinfo):
         """Builds a fulltext version of a revision
 
         revinfo: revisioninfo instance that contains all needed info
-        fh:      file handle to either the .i or the .d revlog file,
-                 depending on whether it is inlined or not
         """
         btext = revinfo.btext
         if btext[0] is not None:
@@ -1108,7 +1118,6 @@
         delta = cachedelta[1]
 
         fulltext = btext[0] = _textfromdelta(
-            fh,
             revlog,
             baserev,
             delta,
@@ -1119,25 +1128,25 @@
         )
         return fulltext
 
-    def _builddeltadiff(self, base, revinfo, fh):
+    def _builddeltadiff(self, base, revinfo):
         revlog = self.revlog
-        t = self.buildtext(revinfo, fh)
+        t = self.buildtext(revinfo)
         if revlog.iscensored(base):
             # deltas based on a censored revision must replace the
             # full content in one patch, so delta works everywhere
             header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
             delta = header + t
         else:
-            ptext = revlog.rawdata(base, _df=fh)
+            ptext = revlog.rawdata(base)
             delta = mdiff.textdiff(ptext, t)
 
         return delta
 
-    def _builddeltainfo(self, revinfo, base, fh, target_rev=None):
+    def _builddeltainfo(self, revinfo, base, target_rev=None):
         # can we use the cached delta?
         revlog = self.revlog
         chainbase = revlog.chainbase(base)
-        if revlog._generaldelta:
+        if revlog.delta_config.general_delta:
             deltabase = base
         else:
             if target_rev is not None and base != target_rev - 1:
@@ -1149,9 +1158,9 @@
                 raise error.ProgrammingError(msg)
             deltabase = chainbase
         snapshotdepth = None
-        if revlog._sparserevlog and deltabase == nullrev:
+        if revlog.delta_config.sparse_revlog and deltabase == nullrev:
             snapshotdepth = 0
-        elif revlog._sparserevlog and revlog.issnapshot(deltabase):
+        elif revlog.delta_config.sparse_revlog and revlog.issnapshot(deltabase):
             # A delta chain should always be one full snapshot,
             # zero or more semi-snapshots, and zero or more deltas
             p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2)
@@ -1168,17 +1177,19 @@
                 and self.revlog.length(currentbase) == 0
             ):
                 currentbase = self.revlog.deltaparent(currentbase)
-            if self.revlog._lazydelta and currentbase == base:
+            if self.revlog.delta_config.lazy_delta and currentbase == base:
                 delta = revinfo.cachedelta[1]
         if delta is None:
-            delta = self._builddeltadiff(base, revinfo, fh)
+            delta = self._builddeltadiff(base, revinfo)
         if self._debug_search:
             msg = b"DBG-DELTAS-SEARCH:     uncompressed-delta-size=%d\n"
             msg %= len(delta)
             self._write_debug(msg)
         # snapshotdept need to be neither None nor 0 level snapshot
-        if revlog.upperboundcomp is not None and snapshotdepth:
-            lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
+        if revlog.delta_config.upper_bound_comp is not None and snapshotdepth:
+            lowestrealisticdeltalen = (
+                len(delta) // revlog.delta_config.upper_bound_comp
+            )
             snapshotlimit = revinfo.textlen >> snapshotdepth
             if self._debug_search:
                 msg = b"DBG-DELTAS-SEARCH:     projected-lower-size=%d\n"
@@ -1194,7 +1205,7 @@
                     msg = b"DBG-DELTAS-SEARCH:     DISCARDED (prev size)\n"
                     self._write_debug(msg)
                 return None
-        header, data = revlog.compress(delta)
+        header, data = revlog._inner.compress(delta)
         deltalen = len(header) + len(data)
         offset = revlog.end(len(revlog) - 1)
         dist = deltalen + offset - revlog.start(chainbase)
@@ -1213,9 +1224,9 @@
             snapshotdepth,
         )
 
-    def _fullsnapshotinfo(self, fh, revinfo, curr):
-        rawtext = self.buildtext(revinfo, fh)
-        data = self.revlog.compress(rawtext)
+    def _fullsnapshotinfo(self, revinfo, curr):
+        rawtext = self.buildtext(revinfo)
+        data = self.revlog._inner.compress(rawtext)
         compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
         deltabase = chainbase = curr
         snapshotdepth = 0
@@ -1232,12 +1243,10 @@
             snapshotdepth,
         )
 
-    def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
+    def finddeltainfo(self, revinfo, excluded_bases=None, target_rev=None):
         """Find an acceptable delta against a candidate revision
 
         revinfo: information about the revision (instance of _revisioninfo)
-        fh:      file handle to either the .i or the .d revlog file,
-                 depending on whether it is inlined or not
 
         Returns the first acceptable candidate revision, as ordered by
         _candidategroups
@@ -1297,7 +1306,7 @@
         # not calling candelta since only one revision needs test, also to
         # avoid overhead fetching flags again.
         if not revinfo.textlen or revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
-            deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
+            deltainfo = self._fullsnapshotinfo(revinfo, target_rev)
             if gather_debug:
                 end = util.timer()
                 dbg['duration'] = end - start
@@ -1316,14 +1325,14 @@
 
         # If this source delta are to be forcibly reuse, let us comply early.
         if (
-            revlog._generaldelta
+            revlog.delta_config.general_delta
             and revinfo.cachedelta is not None
             and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
         ):
             base = revinfo.cachedelta[0]
             if base == nullrev:
                 dbg_type = b"full"
-                deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
+                deltainfo = self._fullsnapshotinfo(revinfo, target_rev)
                 if gather_debug:
                     snapshotdepth = 0
             elif base not in excluded_bases:
@@ -1475,7 +1484,6 @@
                 candidatedelta = self._builddeltainfo(
                     revinfo,
                     candidaterev,
-                    fh,
                     target_rev=target_rev,
                 )
                 if self._debug_search:
@@ -1506,7 +1514,7 @@
 
         if deltainfo is None:
             dbg_type = b"full"
-            deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
+            deltainfo = self._fullsnapshotinfo(revinfo, target_rev)
         elif deltainfo.snapshotdepth:  # pytype: disable=attribute-error
             dbg_type = b"snapshot"
         else:
--- a/mercurial/revlogutils/docket.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlogutils/docket.py	Tue Nov 07 15:21:11 2023 +0100
@@ -330,7 +330,9 @@
     rl_version = version_header & 0xFFFF
     if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
         return None
-    comp = util.compengines[revlog._compengine].revlogheader()
+    comp = util.compengines[
+        revlog.feature_config.compression_engine
+    ].revlogheader()
     docket = RevlogDocket(
         revlog,
         version_header=version_header,
--- a/mercurial/revlogutils/nodemap.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlogutils/nodemap.py	Tue Nov 07 15:21:11 2023 +0100
@@ -174,9 +174,9 @@
             msg = "calling persist nodemap on a revlog without the feature enabled"
             raise error.ProgrammingError(msg)
 
-    can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
+    can_incremental = hasattr(revlog.index, "nodemap_data_incremental")
     ondisk_docket = revlog._nodemap_docket
-    feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
+    feed_data = hasattr(revlog.index, "update_nodemap_data")
     use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
 
     data = None
@@ -216,7 +216,7 @@
         # otherwise fallback to a full new export
         target_docket = NodeMapDocket()
         datafile = _rawdata_filepath(revlog, target_docket)
-        if util.safehasattr(revlog.index, "nodemap_data_all"):
+        if hasattr(revlog.index, "nodemap_data_all"):
             data = revlog.index.nodemap_data_all()
         else:
             data = persistent_data(revlog.index)
--- a/mercurial/revlogutils/randomaccessfile.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlogutils/randomaccessfile.py	Tue Nov 07 15:21:11 2023 +0100
@@ -23,6 +23,76 @@
     return (n & (n - 1) == 0) and n != 0
 
 
+class appender:
+    """the changelog index must be updated last on disk, so we use this class
+    to delay writes to it"""
+
+    def __init__(self, vfs, name, mode, buf):
+        self.data = buf
+        fp = vfs(name, mode)
+        self.fp = fp
+        self.offset = fp.tell()
+        self.size = vfs.fstat(fp).st_size
+        self._end = self.size
+
+    def end(self):
+        return self._end
+
+    def tell(self):
+        return self.offset
+
+    def flush(self):
+        pass
+
+    @property
+    def closed(self):
+        return self.fp.closed
+
+    def close(self):
+        self.fp.close()
+
+    def seek(self, offset, whence=0):
+        '''virtual file offset spans real file and data'''
+        if whence == 0:
+            self.offset = offset
+        elif whence == 1:
+            self.offset += offset
+        elif whence == 2:
+            self.offset = self.end() + offset
+        if self.offset < self.size:
+            self.fp.seek(self.offset)
+
+    def read(self, count=-1):
+        '''only trick here is reads that span real file and data'''
+        ret = b""
+        if self.offset < self.size:
+            s = self.fp.read(count)
+            ret = s
+            self.offset += len(s)
+            if count > 0:
+                count -= len(s)
+        if count != 0:
+            doff = self.offset - self.size
+            self.data.insert(0, b"".join(self.data))
+            del self.data[1:]
+            s = self.data[0][doff : doff + count]
+            self.offset += len(s)
+            ret += s
+        return ret
+
+    def write(self, s):
+        self.data.append(bytes(s))
+        self.offset += len(s)
+        self._end += len(s)
+
+    def __enter__(self):
+        self.fp.__enter__()
+        return self
+
+    def __exit__(self, *args):
+        return self.fp.__exit__(*args)
+
+
 class randomaccessfile:
     """Accessing arbitrary chuncks of data within a file, with some caching"""
 
@@ -46,26 +116,38 @@
         if initial_cache:
             self._cached_chunk_position, self._cached_chunk = initial_cache
 
+        self._delay_buffer = None
+
     def clear_cache(self):
         self._cached_chunk = b''
         self._cached_chunk_position = 0
 
+    @property
+    def is_open(self):
+        """True if any file handle is being held
+
+        Used for assert and debug in the python code"""
+        return (
+            self.reading_handle is not None or self.writing_handle is not None
+        )
+
     def _open(self, mode=b'r'):
         """Return a file object"""
-        return self.opener(self.filename, mode=mode)
+        if self._delay_buffer is None:
+            return self.opener(self.filename, mode=mode)
+        else:
+            return appender(
+                self.opener, self.filename, mode, self._delay_buffer
+            )
 
     @contextlib.contextmanager
-    def _open_read(self, existing_file_obj=None):
+    def _read_handle(self):
         """File object suitable for reading data"""
-        # Use explicit file handle, if given.
-        if existing_file_obj is not None:
-            yield existing_file_obj
-
         # Use a file handle being actively used for writes, if available.
         # There is some danger to doing this because reads will seek the
         # file. However, revlog._writeentry performs a SEEK_END before all
         # writes, so we should be safe.
-        elif self.writing_handle:
+        if self.writing_handle:
             yield self.writing_handle
 
         elif self.reading_handle:
@@ -93,7 +175,7 @@
         else:
             yield
 
-    def read_chunk(self, offset, length, existing_file_obj=None):
+    def read_chunk(self, offset, length):
         """Read a chunk of bytes from the file.
 
         Accepts an absolute offset, length to read, and an optional existing
@@ -116,9 +198,9 @@
             relative_start = offset - cache_start
             return util.buffer(self._cached_chunk, relative_start, length)
 
-        return self._read_and_update_cache(offset, length, existing_file_obj)
+        return self._read_and_update_cache(offset, length)
 
-    def _read_and_update_cache(self, offset, length, existing_file_obj=None):
+    def _read_and_update_cache(self, offset, length):
         # Cache data both forward and backward around the requested
         # data, in a fixed size window. This helps speed up operations
         # involving reading the revlog backwards.
@@ -127,7 +209,7 @@
             (offset + length + self.default_cached_chunk_size)
             & ~(self.default_cached_chunk_size - 1)
         ) - real_offset
-        with self._open_read(existing_file_obj) as file_obj:
+        with self._read_handle() as file_obj:
             file_obj.seek(real_offset)
             data = file_obj.read(real_length)
 
--- a/mercurial/revlogutils/rewrite.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revlogutils/rewrite.py	Tue Nov 07 15:21:11 2023 +0100
@@ -75,7 +75,7 @@
     )
     newrl._format_version = rl._format_version
     newrl._format_flags = rl._format_flags
-    newrl._generaldelta = rl._generaldelta
+    newrl.delta_config.general_delta = rl.delta_config.general_delta
     newrl._parse_index = rl._parse_index
 
     for rev in rl.revs():
@@ -109,7 +109,7 @@
                     b'revision having delta stored'
                 )
                 raise error.Abort(m)
-            rawtext = rl._chunk(rev)
+            rawtext = rl._inner._chunk(rev)
         else:
             rawtext = rl.rawdata(rev)
 
@@ -126,7 +126,8 @@
         rl.opener.rename(newrl._datafile, rl._datafile)
 
     rl.clearcaches()
-    rl._loadindex()
+    chunk_cache = rl._loadindex()
+    rl._load_inner(chunk_cache)
 
 
 def v2_censor(revlog, tr, censornode, tombstone=b''):
@@ -234,7 +235,7 @@
     dc = deltas.deltacomputer(revlog)
     rewritten_entries = {}
     first_excl_rev = min(excluded_revs)
-    with revlog._segmentfile._open_read() as dfh:
+    with revlog.reading():
         for rev in range(first_excl_rev, len(old_index)):
             if rev in excluded_revs:
                 # this revision will be preserved as is, so we don't need to
@@ -250,7 +251,7 @@
                 rewritten_entries[rev] = (nullrev, 0, 0, COMP_MODE_PLAIN)
             else:
 
-                text = revlog.rawdata(rev, _df=dfh)
+                text = revlog.rawdata(rev)
                 info = revlogutils.revisioninfo(
                     node=entry[ENTRY_NODE_ID],
                     p1=revlog.node(entry[ENTRY_PARENT_1]),
@@ -261,7 +262,7 @@
                     flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF,
                 )
                 d = dc.finddeltainfo(
-                    info, dfh, excluded_bases=excluded_revs, target_rev=rev
+                    info, excluded_bases=excluded_revs, target_rev=rev
                 )
                 default_comp = revlog._docket.default_compression_header
                 comp_mode, d = deltas.delta_compression(default_comp, d)
@@ -539,7 +540,7 @@
             util.copyfile(
                 rl.opener.join(index_file),
                 rl.opener.join(new_file_path),
-                checkambig=rl._checkambig,
+                checkambig=rl.data_config.check_ambig,
             )
 
             with rl.opener(new_file_path, mode=b"r+") as fp:
@@ -774,13 +775,7 @@
                 (base_rev, delta),
                 flags,
             )
-            # cached by the global "writing" context
-            assert revlog._writinghandles is not None
-            if revlog._inline:
-                fh = revlog._writinghandles[0]
-            else:
-                fh = revlog._writinghandles[1]
-            return deltacomputer.buildtext(revinfo, fh)
+            return deltacomputer.buildtext(revinfo)
 
         is_affected = _is_revision_affected_fast_inner(
             is_censored,
--- a/mercurial/revset.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revset.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,7 +12,6 @@
 import re
 
 from .i18n import _
-from .pycompat import getattr
 from .node import (
     bin,
     nullrev,
--- a/mercurial/revsetlang.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/revsetlang.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
 import string
 
 from .i18n import _
-from .pycompat import getattr
 from .node import hex
 from . import (
     error,
--- a/mercurial/rewriteutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/rewriteutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -21,7 +21,6 @@
     obsutil,
     revset,
     scmutil,
-    util,
 )
 
 
@@ -77,7 +76,7 @@
         hint = _(b"no changeset checked out")
         raise error.InputError(msg, hint=hint)
 
-    if any(util.safehasattr(r, 'rev') for r in revs):
+    if any(hasattr(r, 'rev') for r in revs):
         repo.ui.develwarn(b"rewriteutil.precheck called with ctx not revs")
         revs = (r.rev() for r in revs)
 
--- a/mercurial/scmposix.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/scmposix.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
     Tuple,
 )
 
-from .pycompat import getattr
 from . import (
     encoding,
     pycompat,
--- a/mercurial/scmutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/scmutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -23,7 +23,6 @@
     short,
     wdirrev,
 )
-from .pycompat import getattr
 from .thirdparty import attr
 from . import (
     copies as copiesmod,
@@ -233,11 +232,7 @@
             reason = encoding.unitolocal(reason)
         ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
     except (IOError, OSError) as inst:
-        if (
-            util.safehasattr(inst, "args")
-            and inst.args
-            and inst.args[0] == errno.EPIPE
-        ):
+        if hasattr(inst, "args") and inst.args and inst.args[0] == errno.EPIPE:
             pass
         elif getattr(inst, "strerror", None):  # common IOError or OSError
             if getattr(inst, "filename", None) is not None:
@@ -561,11 +556,11 @@
             if cache is not None:
                 nodetree = cache.get(b'disambiguationnodetree')
             if not nodetree:
-                if util.safehasattr(parsers, 'nodetree'):
+                if hasattr(parsers, 'nodetree'):
                     # The CExt is the only implementation to provide a nodetree
                     # class so far.
                     index = cl.index
-                    if util.safehasattr(index, 'get_cindex'):
+                    if hasattr(index, 'get_cindex'):
                         # the rust wrapped need to give access to its internal index
                         index = index.get_cindex()
                     nodetree = parsers.nodetree(index, len(revs))
@@ -1066,7 +1061,7 @@
         return
 
     # translate mapping's other forms
-    if not util.safehasattr(replacements, 'items'):
+    if not hasattr(replacements, 'items'):
         replacements = {(n,): () for n in replacements}
     else:
         # upgrading non tuple "source" to tuple ones for BC
@@ -1692,6 +1687,10 @@
     def __call__(self, func):
         self.func = func
         self.sname = func.__name__
+        # XXX We should be using a unicode string instead of bytes for the main
+        # name (and the _filecache key). The fact we use bytes is a remains
+        # from Python2, since the name is derived from an attribute name a
+        # `str` is a better fit now that we support Python3 only
         self.name = pycompat.sysbytes(self.sname)
         return self
 
--- a/mercurial/shelve.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/shelve.py	Tue Nov 07 15:21:11 2023 +0100
@@ -516,7 +516,7 @@
 
 def getcommitfunc(extra, interactive, editor=False):
     def commitfunc(ui, repo, message, match, opts):
-        hasmq = util.safehasattr(repo, 'mq')
+        hasmq = hasattr(repo, 'mq')
         if hasmq:
             saved, repo.mq.checkapplied = repo.mq.checkapplied, False
 
@@ -1123,11 +1123,9 @@
 
 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
     _restoreactivebookmark(repo, activebookmark)
-    # The transaction aborting will strip all the commits for us,
-    # but it doesn't update the inmemory structures, so addchangegroup
-    # hooks still fire and try to operate on the missing commits.
-    # Clean up manually to prevent this.
-    repo.unfiltered().changelog.strip(oldtiprev, tr)
+    # We used to manually strip the commit to update inmemory structure and
+    # prevent some issue around hooks. This no longer seems to be the case, so
+    # we simply abort the transaction.
     _aborttransaction(repo, tr)
 
 
--- a/mercurial/smartset.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/smartset.py	Tue Nov 07 15:21:11 2023 +0100
@@ -6,7 +6,6 @@
 # GNU General Public License version 2 or any later version.
 
 
-from .pycompat import getattr
 from . import (
     encoding,
     error,
@@ -137,7 +136,7 @@
 
         This is part of the mandatory API for smartset."""
         # builtin cannot be cached. but do not needs to
-        if cache and util.safehasattr(condition, '__code__'):
+        if cache and hasattr(condition, '__code__'):
             condition = util.cachefunc(condition)
         return filteredset(self, condition, condrepr)
 
@@ -359,10 +358,10 @@
         return s
 
     def __and__(self, other):
-        return self._fastsetop(other, b'__and__')
+        return self._fastsetop(other, '__and__')
 
     def __sub__(self, other):
-        return self._fastsetop(other, b'__sub__')
+        return self._fastsetop(other, '__sub__')
 
     def _slice(self, start, stop):
         # creating new list should be generally cheaper than iterating items
@@ -1127,13 +1126,16 @@
         This boldly assumes the other contains valid revs only.
         """
         # other not a smartset, make is so
-        if not util.safehasattr(other, 'isascending'):
+        if not hasattr(other, 'isascending'):
             # filter out hidden revision
             # (this boldly assumes all smartset are pure)
             #
             # `other` was used with "&", let's assume this is a set like
             # object.
-            other = baseset(other - self._hiddenrevs)
+            other = baseset(other)
+
+        if self._hiddenrevs:
+            other = other - self._hiddenrevs
 
         other.sort(reverse=self.isdescending())
         return other
--- a/mercurial/sshpeer.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/sshpeer.py	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,6 @@
 import uuid
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     error,
     pycompat,
@@ -130,7 +129,7 @@
             if sideready:
                 _forwardoutput(self._ui, self._side)
             if mainready:
-                meth = getattr(self._main, methname)
+                meth = getattr(self._main, pycompat.sysstr(methname))
                 if data is None:
                     return meth()
                 else:
--- a/mercurial/sslutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/sslutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,6 @@
 import warnings
 
 from .i18n import _
-from .pycompat import getattr
 from .node import hex
 from . import (
     encoding,
@@ -50,11 +49,11 @@
 # were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
 # support. At the mentioned commit, they were unconditionally defined.
 supportedprotocols = set()
-if getattr(ssl, 'HAS_TLSv1', util.safehasattr(ssl, 'PROTOCOL_TLSv1')):
+if getattr(ssl, 'HAS_TLSv1', hasattr(ssl, 'PROTOCOL_TLSv1')):
     supportedprotocols.add(b'tls1.0')
-if getattr(ssl, 'HAS_TLSv1_1', util.safehasattr(ssl, 'PROTOCOL_TLSv1_1')):
+if getattr(ssl, 'HAS_TLSv1_1', hasattr(ssl, 'PROTOCOL_TLSv1_1')):
     supportedprotocols.add(b'tls1.1')
-if getattr(ssl, 'HAS_TLSv1_2', util.safehasattr(ssl, 'PROTOCOL_TLSv1_2')):
+if getattr(ssl, 'HAS_TLSv1_2', hasattr(ssl, 'PROTOCOL_TLSv1_2')):
     supportedprotocols.add(b'tls1.2')
 
 
@@ -312,7 +311,7 @@
     # is loaded and contains that removed CA, you've just undone the user's
     # choice.
 
-    if util.safehasattr(ssl, 'TLSVersion'):
+    if hasattr(ssl, 'TLSVersion'):
         # python 3.7+
         sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
         minimumprotocol = settings[b'minimumprotocol']
@@ -419,7 +418,7 @@
             pass
 
         # Try to print more helpful error messages for known failures.
-        if util.safehasattr(e, 'reason'):
+        if hasattr(e, 'reason'):
             # This error occurs when the client and server don't share a
             # common/supported SSL/TLS protocol. We've disabled SSLv2 and SSLv3
             # outright. Hopefully the reason for this error is that we require
@@ -546,7 +545,7 @@
                 _(b'referenced certificate file (%s) does not exist') % f
             )
 
-    if util.safehasattr(ssl, 'TLSVersion'):
+    if hasattr(ssl, 'TLSVersion'):
         # python 3.7+
         sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
         sslcontext.options |= getattr(ssl, 'OP_NO_COMPRESSION', 0)
@@ -628,7 +627,7 @@
     # Otherwise, use the list of more secure ciphers if found in the ssl module.
     if exactprotocol:
         sslcontext.set_ciphers('DEFAULT:@SECLEVEL=0')
-    elif util.safehasattr(ssl, '_RESTRICTED_SERVER_CIPHERS'):
+    elif hasattr(ssl, '_RESTRICTED_SERVER_CIPHERS'):
         sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
         # pytype: disable=module-attr
         sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
--- a/mercurial/statprof.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/statprof.py	Tue Nov 07 15:21:11 2023 +0100
@@ -167,7 +167,7 @@
         # a float
         if frequency:
             self.sample_interval = 1.0 / frequency
-        elif not pycompat.hasattr(self, 'sample_interval'):
+        elif not hasattr(self, 'sample_interval'):
             # default to 1000 Hz
             self.sample_interval = 1.0 / 1000.0
         else:
--- a/mercurial/store.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/store.py	Tue Nov 07 15:21:11 2023 +0100
@@ -13,7 +13,6 @@
 from typing import Generator, List
 
 from .i18n import _
-from .pycompat import getattr
 from .thirdparty import attr
 from .node import hex
 from .revlogutils.constants import (
--- a/mercurial/streamclone.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/streamclone.py	Tue Nov 07 15:21:11 2023 +0100
@@ -428,7 +428,7 @@
             with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
                 for i in range(filecount):
                     # XXX doesn't support '\n' or '\r' in filenames
-                    if util.safehasattr(fp, 'readline'):
+                    if hasattr(fp, 'readline'):
                         l = fp.readline()
                     else:
                         # inline clonebundles use a chunkbuffer, so no readline
--- a/mercurial/strip.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/strip.py	Tue Nov 07 15:21:11 2023 +0100
@@ -1,5 +1,4 @@
 from .i18n import _
-from .pycompat import getattr
 from . import (
     bookmarks as bookmarksmod,
     cmdutil,
@@ -12,7 +11,6 @@
     registrar,
     repair,
     scmutil,
-    util,
 )
 
 release = lockmod.release
@@ -36,7 +34,7 @@
     currentbranch = repo[None].branch()
 
     if (
-        util.safehasattr(repo, 'mq')
+        hasattr(repo, 'mq')
         and p2 != repo.nullid
         and p2 in [x.node for x in repo.mq.applied]
     ):
--- a/mercurial/subrepo.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/subrepo.py	Tue Nov 07 15:21:11 2023 +0100
@@ -1136,7 +1136,7 @@
             # --non-interactive.
             if commands[0] in (b'update', b'checkout', b'commit'):
                 cmd.append(b'--non-interactive')
-        if util.safehasattr(subprocess, 'CREATE_NO_WINDOW'):
+        if hasattr(subprocess, 'CREATE_NO_WINDOW'):
             # On Windows, prevent command prompts windows from popping up when
             # running in pythonw.
             extrakw['creationflags'] = getattr(subprocess, 'CREATE_NO_WINDOW')
@@ -1511,7 +1511,7 @@
             # the end of git diff arguments is used for paths
             commands.insert(1, b'--color')
         extrakw = {}
-        if util.safehasattr(subprocess, 'CREATE_NO_WINDOW'):
+        if hasattr(subprocess, 'CREATE_NO_WINDOW'):
             # On Windows, prevent command prompts windows from popping up when
             # running in pythonw.
             extrakw['creationflags'] = getattr(subprocess, 'CREATE_NO_WINDOW')
--- a/mercurial/subrepoutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/subrepoutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,6 @@
 import re
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     config,
     error,
@@ -384,7 +383,7 @@
     Either absolute or relative the outermost repo"""
     parent = repo
     chunks = []
-    while util.safehasattr(parent, '_subparent'):
+    while hasattr(parent, '_subparent'):
         source = urlutil.url(parent._subsource)
         chunks.append(bytes(source))
         if source.isabs():
@@ -400,7 +399,7 @@
     # type: (localrepo.localrepository) -> bytes
     """return path to this (sub)repo as seen from outermost repo"""
     parent = repo
-    while util.safehasattr(parent, '_subparent'):
+    while hasattr(parent, '_subparent'):
         parent = parent._subparent
     return repo.root[len(pathutil.normasprefix(parent.root)) :]
 
@@ -415,7 +414,7 @@
     # type: (localrepo.localrepository, bool, bool) -> Optional[bytes]
     """return pull/push path of repo - either based on parent repo .hgsub info
     or on the top repo config. Abort or return None if no source found."""
-    if util.safehasattr(repo, '_subparent'):
+    if hasattr(repo, '_subparent'):
         source = urlutil.url(repo._subsource)
         if source.isabs():
             return bytes(source)
@@ -428,7 +427,7 @@
             return bytes(parent)
     else:  # recursion reached top repo
         path = None
-        if util.safehasattr(repo, '_subtoppath'):
+        if hasattr(repo, '_subtoppath'):
             path = repo._subtoppath
         elif push and repo.ui.config(b'paths', b'default-push'):
             path = repo.ui.config(b'paths', b'default-push')
--- a/mercurial/templatefilters.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/templatefilters.py	Tue Nov 07 15:21:11 2023 +0100
@@ -339,14 +339,14 @@
         raise error.ProgrammingError(
             b'Mercurial only does output with bytes: %r' % obj
         )
-    elif util.safehasattr(obj, 'keys'):
+    elif hasattr(obj, 'keys'):
         out = [
             b'"%s": %s'
             % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
             for k, v in sorted(obj.items())
         ]
         return b'{' + b', '.join(out) + b'}'
-    elif util.safehasattr(obj, '__iter__'):
+    elif hasattr(obj, '__iter__'):
         out = [json(i, paranoid) for i in obj]
         return b'[' + b', '.join(out) + b']'
     raise error.ProgrammingError(b'cannot encode %r' % obj)
--- a/mercurial/templater.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/templater.py	Tue Nov 07 15:21:11 2023 +0100
@@ -72,7 +72,6 @@
 from .i18n import _
 from .pycompat import (
     FileNotFoundError,
-    getattr,
 )
 from . import (
     config,
--- a/mercurial/templateutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/templateutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,6 @@
 import types
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     error,
     pycompat,
@@ -281,7 +280,7 @@
 
     def getmember(self, context, mapping, key):
         # TODO: maybe split hybrid list/dict types?
-        if not util.safehasattr(self._values, 'get'):
+        if not hasattr(self._values, 'get'):
             raise error.ParseError(_(b'not a dictionary'))
         key = unwrapastype(context, mapping, key, self._keytype)
         return self._wrapvalue(key, self._values.get(key))
@@ -301,13 +300,13 @@
     def _wrapvalue(self, key, val):
         if val is None:
             return
-        if util.safehasattr(val, '_makemap'):
+        if hasattr(val, '_makemap'):
             # a nested hybrid list/dict, which has its own way of map operation
             return val
         return hybriditem(None, key, val, self._makemap)
 
     def filter(self, context, mapping, select):
-        if util.safehasattr(self._values, 'get'):
+        if hasattr(self._values, 'get'):
             values = {
                 k: v
                 for k, v in self._values.items()
@@ -341,7 +340,7 @@
     def tovalue(self, context, mapping):
         # TODO: make it non-recursive for trivial lists/dicts
         xs = self._values
-        if util.safehasattr(xs, 'get'):
+        if hasattr(xs, 'get'):
             return {k: unwrapvalue(context, mapping, v) for k, v in xs.items()}
         return [unwrapvalue(context, mapping, x) for x in xs]
 
@@ -858,7 +857,7 @@
         )
     elif thing is None:
         pass
-    elif not util.safehasattr(thing, '__iter__'):
+    elif not hasattr(thing, '__iter__'):
         yield pycompat.bytestr(thing)
     else:
         for i in thing:
@@ -868,7 +867,7 @@
                 yield i
             elif i is None:
                 pass
-            elif not util.safehasattr(i, '__iter__'):
+            elif not hasattr(i, '__iter__'):
                 yield pycompat.bytestr(i)
             else:
                 for j in flatten(context, mapping, i):
--- a/mercurial/testing/storage.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/testing/storage.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,7 +12,6 @@
     hex,
     nullrev,
 )
-from ..pycompat import getattr
 from .. import (
     error,
     mdiff,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/LICENSE	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Taneli Hukkinen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/README.md	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,182 @@
+[![Build Status](https://github.com/hukkin/tomli/workflows/Tests/badge.svg?branch=master)](https://github.com/hukkin/tomli/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush)
+[![codecov.io](https://codecov.io/gh/hukkin/tomli/branch/master/graph/badge.svg)](https://codecov.io/gh/hukkin/tomli)
+[![PyPI version](https://img.shields.io/pypi/v/tomli)](https://pypi.org/project/tomli)
+
+# Tomli
+
+> A lil' TOML parser
+
+**Table of Contents**  *generated with [mdformat-toc](https://github.com/hukkin/mdformat-toc)*
+
+<!-- mdformat-toc start --slug=github --maxlevel=6 --minlevel=2 -->
+
+- [Intro](#intro)
+- [Installation](#installation)
+- [Usage](#usage)
+  - [Parse a TOML string](#parse-a-toml-string)
+  - [Parse a TOML file](#parse-a-toml-file)
+  - [Handle invalid TOML](#handle-invalid-toml)
+  - [Construct `decimal.Decimal`s from TOML floats](#construct-decimaldecimals-from-toml-floats)
+- [FAQ](#faq)
+  - [Why this parser?](#why-this-parser)
+  - [Is comment preserving round-trip parsing supported?](#is-comment-preserving-round-trip-parsing-supported)
+  - [Is there a `dumps`, `write` or `encode` function?](#is-there-a-dumps-write-or-encode-function)
+  - [How do TOML types map into Python types?](#how-do-toml-types-map-into-python-types)
+- [Performance](#performance)
+
+<!-- mdformat-toc end -->
+
+## Intro<a name="intro"></a>
+
+Tomli is a Python library for parsing [TOML](https://toml.io).
+Tomli is fully compatible with [TOML v1.0.0](https://toml.io/en/v1.0.0).
+
+## Installation<a name="installation"></a>
+
+```bash
+pip install tomli
+```
+
+## Usage<a name="usage"></a>
+
+### Parse a TOML string<a name="parse-a-toml-string"></a>
+
+```python
+import tomli
+
+toml_str = """
+           gretzky = 99
+
+           [kurri]
+           jari = 17
+           """
+
+toml_dict = tomli.loads(toml_str)
+assert toml_dict == {"gretzky": 99, "kurri": {"jari": 17}}
+```
+
+### Parse a TOML file<a name="parse-a-toml-file"></a>
+
+```python
+import tomli
+
+with open("path_to_file/conf.toml", "rb") as f:
+    toml_dict = tomli.load(f)
+```
+
+The file must be opened in binary mode (with the `"rb"` flag).
+Binary mode will enforce decoding the file as UTF-8 with universal newlines disabled,
+both of which are required to correctly parse TOML.
+Support for text file objects is deprecated for removal in the next major release.
+
+### Handle invalid TOML<a name="handle-invalid-toml"></a>
+
+```python
+import tomli
+
+try:
+    toml_dict = tomli.loads("]] this is invalid TOML [[")
+except tomli.TOMLDecodeError:
+    print("Yep, definitely not valid.")
+```
+
+Note that while the `TOMLDecodeError` type is public API, error messages of raised instances of it are not.
+Error messages should not be assumed to stay constant across Tomli versions.
+
+### Construct `decimal.Decimal`s from TOML floats<a name="construct-decimaldecimals-from-toml-floats"></a>
+
+```python
+from decimal import Decimal
+import tomli
+
+toml_dict = tomli.loads("precision-matters = 0.982492", parse_float=Decimal)
+assert toml_dict["precision-matters"] == Decimal("0.982492")
+```
+
+Note that `decimal.Decimal` can be replaced with another callable that converts a TOML float from string to a Python type.
+The `decimal.Decimal` is, however, a practical choice for use cases where float inaccuracies can not be tolerated.
+
+Illegal types include `dict`, `list`, and anything that has the `append` attribute.
+Parsing floats into an illegal type results in undefined behavior.
+
+## FAQ<a name="faq"></a>
+
+### Why this parser?<a name="why-this-parser"></a>
+
+- it's lil'
+- pure Python with zero dependencies
+- the fastest pure Python parser [\*](#performance):
+  15x as fast as [tomlkit](https://pypi.org/project/tomlkit/),
+  2.4x as fast as [toml](https://pypi.org/project/toml/)
+- outputs [basic data types](#how-do-toml-types-map-into-python-types) only
+- 100% spec compliant: passes all tests in
+  [a test set](https://github.com/toml-lang/compliance/pull/8)
+  soon to be merged to the official
+  [compliance tests for TOML](https://github.com/toml-lang/compliance)
+  repository
+- thoroughly tested: 100% branch coverage
+
+### Is comment preserving round-trip parsing supported?<a name="is-comment-preserving-round-trip-parsing-supported"></a>
+
+No.
+
+The `tomli.loads` function returns a plain `dict` that is populated with builtin types and types from the standard library only.
+Preserving comments requires a custom type to be returned so will not be supported,
+at least not by the `tomli.loads` and `tomli.load` functions.
+
+Look into [TOML Kit](https://github.com/sdispater/tomlkit) if preservation of style is what you need.
+
+### Is there a `dumps`, `write` or `encode` function?<a name="is-there-a-dumps-write-or-encode-function"></a>
+
+[Tomli-W](https://github.com/hukkin/tomli-w) is the write-only counterpart of Tomli, providing `dump` and `dumps` functions.
+
+The core library does not include write capability, as most TOML use cases are read-only, and Tomli intends to be minimal.
+
+### How do TOML types map into Python types?<a name="how-do-toml-types-map-into-python-types"></a>
+
+| TOML type        | Python type         | Details                                                      |
+| ---------------- | ------------------- | ------------------------------------------------------------ |
+| Document Root    | `dict`              |                                                              |
+| Key              | `str`               |                                                              |
+| String           | `str`               |                                                              |
+| Integer          | `int`               |                                                              |
+| Float            | `float`             |                                                              |
+| Boolean          | `bool`              |                                                              |
+| Offset Date-Time | `datetime.datetime` | `tzinfo` attribute set to an instance of `datetime.timezone` |
+| Local Date-Time  | `datetime.datetime` | `tzinfo` attribute set to `None`                             |
+| Local Date       | `datetime.date`     |                                                              |
+| Local Time       | `datetime.time`     |                                                              |
+| Array            | `list`              |                                                              |
+| Table            | `dict`              |                                                              |
+| Inline Table     | `dict`              |                                                              |
+
+## Performance<a name="performance"></a>
+
+The `benchmark/` folder in this repository contains a performance benchmark for comparing the various Python TOML parsers.
+The benchmark can be run with `tox -e benchmark-pypi`.
+Running the benchmark on my personal computer output the following:
+
+```console
+foo@bar:~/dev/tomli$ tox -e benchmark-pypi
+benchmark-pypi installed: attrs==19.3.0,click==7.1.2,pytomlpp==1.0.2,qtoml==0.3.0,rtoml==0.7.0,toml==0.10.2,tomli==1.1.0,tomlkit==0.7.2
+benchmark-pypi run-test-pre: PYTHONHASHSEED='2658546909'
+benchmark-pypi run-test: commands[0] | python -c 'import datetime; print(datetime.date.today())'
+2021-07-23
+benchmark-pypi run-test: commands[1] | python --version
+Python 3.8.10
+benchmark-pypi run-test: commands[2] | python benchmark/run.py
+Parsing data.toml 5000 times:
+------------------------------------------------------
+    parser |  exec time | performance (more is better)
+-----------+------------+-----------------------------
+     rtoml |    0.901 s | baseline (100%)
+  pytomlpp |     1.08 s | 83.15%
+     tomli |     3.89 s | 23.15%
+      toml |     9.36 s | 9.63%
+     qtoml |     11.5 s | 7.82%
+   tomlkit |     56.8 s | 1.59%
+```
+
+The parsers are ordered from fastest to slowest, using the fastest parser as baseline.
+Tomli performed the best out of all pure Python TOML parsers,
+losing only to pytomlpp (wraps C++) and rtoml (wraps Rust).
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/__init__.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,9 @@
+"""A lil' TOML parser."""
+
+__all__ = ("loads", "load", "TOMLDecodeError")
+__version__ = "1.2.3"  # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
+
+from ._parser import TOMLDecodeError, load, loads
+
+# Pretend this exception was created here.
+TOMLDecodeError.__module__ = "tomli"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/_parser.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,663 @@
+import string
+from types import MappingProxyType
+from typing import Any, BinaryIO, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple
+import warnings
+
+from ._re import (
+    RE_DATETIME,
+    RE_LOCALTIME,
+    RE_NUMBER,
+    match_to_datetime,
+    match_to_localtime,
+    match_to_number,
+)
+from ._types import Key, ParseFloat, Pos
+
+ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
+
+# Neither of these sets include quotation mark or backslash. They are
+# currently handled as separate cases in the parser functions.
+ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
+ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
+
+ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
+ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+
+ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
+
+TOML_WS = frozenset(" \t")
+TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
+BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
+KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
+HEXDIGIT_CHARS = frozenset(string.hexdigits)
+
+BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
+    {
+        "\\b": "\u0008",  # backspace
+        "\\t": "\u0009",  # tab
+        "\\n": "\u000A",  # linefeed
+        "\\f": "\u000C",  # form feed
+        "\\r": "\u000D",  # carriage return
+        '\\"': "\u0022",  # quote
+        "\\\\": "\u005C",  # backslash
+    }
+)
+
+
+class TOMLDecodeError(ValueError):
+    """An error raised if a document is not valid TOML."""
+
+
+def load(fp: BinaryIO, *, parse_float: ParseFloat = float) -> Dict[str, Any]:
+    """Parse TOML from a binary file object."""
+    s_bytes = fp.read()
+    try:
+        s = s_bytes.decode()
+    except AttributeError:
+        warnings.warn(
+            "Text file object support is deprecated in favor of binary file objects."
+            ' Use `open("foo.toml", "rb")` to open the file in binary mode.',
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        s = s_bytes  # type: ignore[assignment]
+    return loads(s, parse_float=parse_float)
+
+
+def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]:  # noqa: C901
+    """Parse TOML from a string."""
+
+    # The spec allows converting "\r\n" to "\n", even in string
+    # literals. Let's do so to simplify parsing.
+    src = s.replace("\r\n", "\n")
+    pos = 0
+    out = Output(NestedDict(), Flags())
+    header: Key = ()
+
+    # Parse one statement at a time
+    # (typically means one line in TOML source)
+    while True:
+        # 1. Skip line leading whitespace
+        pos = skip_chars(src, pos, TOML_WS)
+
+        # 2. Parse rules. Expect one of the following:
+        #    - end of file
+        #    - end of line
+        #    - comment
+        #    - key/value pair
+        #    - append dict to list (and move to its namespace)
+        #    - create dict (and move to its namespace)
+        # Skip trailing whitespace when applicable.
+        try:
+            char = src[pos]
+        except IndexError:
+            break
+        if char == "\n":
+            pos += 1
+            continue
+        if char in KEY_INITIAL_CHARS:
+            pos = key_value_rule(src, pos, out, header, parse_float)
+            pos = skip_chars(src, pos, TOML_WS)
+        elif char == "[":
+            try:
+                second_char: Optional[str] = src[pos + 1]
+            except IndexError:
+                second_char = None
+            if second_char == "[":
+                pos, header = create_list_rule(src, pos, out)
+            else:
+                pos, header = create_dict_rule(src, pos, out)
+            pos = skip_chars(src, pos, TOML_WS)
+        elif char != "#":
+            raise suffixed_err(src, pos, "Invalid statement")
+
+        # 3. Skip comment
+        pos = skip_comment(src, pos)
+
+        # 4. Expect end of line or end of file
+        try:
+            char = src[pos]
+        except IndexError:
+            break
+        if char != "\n":
+            raise suffixed_err(
+                src, pos, "Expected newline or end of document after a statement"
+            )
+        pos += 1
+
+    return out.data.dict
+
+
+class Flags:
+    """Flags that map to parsed keys/namespaces."""
+
+    # Marks an immutable namespace (inline array or inline table).
+    FROZEN = 0
+    # Marks a nest that has been explicitly created and can no longer
+    # be opened using the "[table]" syntax.
+    EXPLICIT_NEST = 1
+
+    def __init__(self) -> None:
+        self._flags: Dict[str, dict] = {}
+
+    def unset_all(self, key: Key) -> None:
+        cont = self._flags
+        for k in key[:-1]:
+            if k not in cont:
+                return
+            cont = cont[k]["nested"]
+        cont.pop(key[-1], None)
+
+    def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None:
+        cont = self._flags
+        for k in head_key:
+            if k not in cont:
+                cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+            cont = cont[k]["nested"]
+        for k in rel_key:
+            if k in cont:
+                cont[k]["flags"].add(flag)
+            else:
+                cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}}
+            cont = cont[k]["nested"]
+
+    def set(self, key: Key, flag: int, *, recursive: bool) -> None:  # noqa: A003
+        cont = self._flags
+        key_parent, key_stem = key[:-1], key[-1]
+        for k in key_parent:
+            if k not in cont:
+                cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+            cont = cont[k]["nested"]
+        if key_stem not in cont:
+            cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+        cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
+
+    def is_(self, key: Key, flag: int) -> bool:
+        if not key:
+            return False  # document root has no flags
+        cont = self._flags
+        for k in key[:-1]:
+            if k not in cont:
+                return False
+            inner_cont = cont[k]
+            if flag in inner_cont["recursive_flags"]:
+                return True
+            cont = inner_cont["nested"]
+        key_stem = key[-1]
+        if key_stem in cont:
+            cont = cont[key_stem]
+            return flag in cont["flags"] or flag in cont["recursive_flags"]
+        return False
+
+
+class NestedDict:
+    def __init__(self) -> None:
+        # The parsed content of the TOML document
+        self.dict: Dict[str, Any] = {}
+
+    def get_or_create_nest(
+        self,
+        key: Key,
+        *,
+        access_lists: bool = True,
+    ) -> dict:
+        cont: Any = self.dict
+        for k in key:
+            if k not in cont:
+                cont[k] = {}
+            cont = cont[k]
+            if access_lists and isinstance(cont, list):
+                cont = cont[-1]
+            if not isinstance(cont, dict):
+                raise KeyError("There is no nest behind this key")
+        return cont
+
+    def append_nest_to_list(self, key: Key) -> None:
+        cont = self.get_or_create_nest(key[:-1])
+        last_key = key[-1]
+        if last_key in cont:
+            list_ = cont[last_key]
+            try:
+                list_.append({})
+            except AttributeError:
+                raise KeyError("An object other than list found behind this key")
+        else:
+            cont[last_key] = [{}]
+
+
+class Output(NamedTuple):
+    data: NestedDict
+    flags: Flags
+
+
+def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
+    try:
+        while src[pos] in chars:
+            pos += 1
+    except IndexError:
+        pass
+    return pos
+
+
+def skip_until(
+    src: str,
+    pos: Pos,
+    expect: str,
+    *,
+    error_on: FrozenSet[str],
+    error_on_eof: bool,
+) -> Pos:
+    try:
+        new_pos = src.index(expect, pos)
+    except ValueError:
+        new_pos = len(src)
+        if error_on_eof:
+            raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
+
+    if not error_on.isdisjoint(src[pos:new_pos]):
+        while src[pos] not in error_on:
+            pos += 1
+        raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
+    return new_pos
+
+
+def skip_comment(src: str, pos: Pos) -> Pos:
+    try:
+        char: Optional[str] = src[pos]
+    except IndexError:
+        char = None
+    if char == "#":
+        return skip_until(
+            src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
+        )
+    return pos
+
+
+def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
+    while True:
+        pos_before_skip = pos
+        pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+        pos = skip_comment(src, pos)
+        if pos == pos_before_skip:
+            return pos
+
+
+def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
+    pos += 1  # Skip "["
+    pos = skip_chars(src, pos, TOML_WS)
+    pos, key = parse_key(src, pos)
+
+    if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
+        raise suffixed_err(src, pos, f"Can not declare {key} twice")
+    out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+    try:
+        out.data.get_or_create_nest(key)
+    except KeyError:
+        raise suffixed_err(src, pos, "Can not overwrite a value") from None
+
+    if not src.startswith("]", pos):
+        raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration')
+    return pos + 1, key
+
+
+def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]:
+    pos += 2  # Skip "[["
+    pos = skip_chars(src, pos, TOML_WS)
+    pos, key = parse_key(src, pos)
+
+    if out.flags.is_(key, Flags.FROZEN):
+        raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}")
+    # Free the namespace now that it points to another empty list item...
+    out.flags.unset_all(key)
+    # ...but this key precisely is still prohibited from table declaration
+    out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+    try:
+        out.data.append_nest_to_list(key)
+    except KeyError:
+        raise suffixed_err(src, pos, "Can not overwrite a value") from None
+
+    if not src.startswith("]]", pos):
+        raise suffixed_err(src, pos, 'Expected "]]" at the end of an array declaration')
+    return pos + 2, key
+
+
+def key_value_rule(
+    src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
+) -> Pos:
+    pos, key, value = parse_key_value_pair(src, pos, parse_float)
+    key_parent, key_stem = key[:-1], key[-1]
+    abs_key_parent = header + key_parent
+
+    if out.flags.is_(abs_key_parent, Flags.FROZEN):
+        raise suffixed_err(
+            src, pos, f"Can not mutate immutable namespace {abs_key_parent}"
+        )
+    # Containers in the relative path can't be opened with the table syntax after this
+    out.flags.set_for_relative_key(header, key, Flags.EXPLICIT_NEST)
+    try:
+        nest = out.data.get_or_create_nest(abs_key_parent)
+    except KeyError:
+        raise suffixed_err(src, pos, "Can not overwrite a value") from None
+    if key_stem in nest:
+        raise suffixed_err(src, pos, "Can not overwrite a value")
+    # Mark inline table and array namespaces recursively immutable
+    if isinstance(value, (dict, list)):
+        out.flags.set(header + key, Flags.FROZEN, recursive=True)
+    nest[key_stem] = value
+    return pos
+
+
+def parse_key_value_pair(
+    src: str, pos: Pos, parse_float: ParseFloat
+) -> Tuple[Pos, Key, Any]:
+    pos, key = parse_key(src, pos)
+    try:
+        char: Optional[str] = src[pos]
+    except IndexError:
+        char = None
+    if char != "=":
+        raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair')
+    pos += 1
+    pos = skip_chars(src, pos, TOML_WS)
+    pos, value = parse_value(src, pos, parse_float)
+    return pos, key, value
+
+
+def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]:
+    pos, key_part = parse_key_part(src, pos)
+    key: Key = (key_part,)
+    pos = skip_chars(src, pos, TOML_WS)
+    while True:
+        try:
+            char: Optional[str] = src[pos]
+        except IndexError:
+            char = None
+        if char != ".":
+            return pos, key
+        pos += 1
+        pos = skip_chars(src, pos, TOML_WS)
+        pos, key_part = parse_key_part(src, pos)
+        key += (key_part,)
+        pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]:
+    try:
+        char: Optional[str] = src[pos]
+    except IndexError:
+        char = None
+    if char in BARE_KEY_CHARS:
+        start_pos = pos
+        pos = skip_chars(src, pos, BARE_KEY_CHARS)
+        return pos, src[start_pos:pos]
+    if char == "'":
+        return parse_literal_str(src, pos)
+    if char == '"':
+        return parse_one_line_basic_str(src, pos)
+    raise suffixed_err(src, pos, "Invalid initial character for a key part")
+
+
+def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]:
+    pos += 1
+    return parse_basic_str(src, pos, multiline=False)
+
+
+def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]:
+    pos += 1
+    array: list = []
+
+    pos = skip_comments_and_array_ws(src, pos)
+    if src.startswith("]", pos):
+        return pos + 1, array
+    while True:
+        pos, val = parse_value(src, pos, parse_float)
+        array.append(val)
+        pos = skip_comments_and_array_ws(src, pos)
+
+        c = src[pos : pos + 1]
+        if c == "]":
+            return pos + 1, array
+        if c != ",":
+            raise suffixed_err(src, pos, "Unclosed array")
+        pos += 1
+
+        pos = skip_comments_and_array_ws(src, pos)
+        if src.startswith("]", pos):
+            return pos + 1, array
+
+
+def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]:
+    pos += 1
+    nested_dict = NestedDict()
+    flags = Flags()
+
+    pos = skip_chars(src, pos, TOML_WS)
+    if src.startswith("}", pos):
+        return pos + 1, nested_dict.dict
+    while True:
+        pos, key, value = parse_key_value_pair(src, pos, parse_float)
+        key_parent, key_stem = key[:-1], key[-1]
+        if flags.is_(key, Flags.FROZEN):
+            raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}")
+        try:
+            nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
+        except KeyError:
+            raise suffixed_err(src, pos, "Can not overwrite a value") from None
+        if key_stem in nest:
+            raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
+        nest[key_stem] = value
+        pos = skip_chars(src, pos, TOML_WS)
+        c = src[pos : pos + 1]
+        if c == "}":
+            return pos + 1, nested_dict.dict
+        if c != ",":
+            raise suffixed_err(src, pos, "Unclosed inline table")
+        if isinstance(value, (dict, list)):
+            flags.set(key, Flags.FROZEN, recursive=True)
+        pos += 1
+        pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_basic_str_escape(  # noqa: C901
+    src: str, pos: Pos, *, multiline: bool = False
+) -> Tuple[Pos, str]:
+    escape_id = src[pos : pos + 2]
+    pos += 2
+    if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
+        # Skip whitespace until next non-whitespace character or end of
+        # the doc. Error if non-whitespace is found before newline.
+        if escape_id != "\\\n":
+            pos = skip_chars(src, pos, TOML_WS)
+            try:
+                char = src[pos]
+            except IndexError:
+                return pos, ""
+            if char != "\n":
+                raise suffixed_err(src, pos, 'Unescaped "\\" in a string')
+            pos += 1
+        pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+        return pos, ""
+    if escape_id == "\\u":
+        return parse_hex_char(src, pos, 4)
+    if escape_id == "\\U":
+        return parse_hex_char(src, pos, 8)
+    try:
+        return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
+    except KeyError:
+        if len(escape_id) != 2:
+            raise suffixed_err(src, pos, "Unterminated string") from None
+        raise suffixed_err(src, pos, 'Unescaped "\\" in a string') from None
+
+
+def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]:
+    return parse_basic_str_escape(src, pos, multiline=True)
+
+
+def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]:
+    hex_str = src[pos : pos + hex_len]
+    if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
+        raise suffixed_err(src, pos, "Invalid hex value")
+    pos += hex_len
+    hex_int = int(hex_str, 16)
+    if not is_unicode_scalar_value(hex_int):
+        raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
+    return pos, chr(hex_int)
+
+
+def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]:
+    pos += 1  # Skip starting apostrophe
+    start_pos = pos
+    pos = skip_until(
+        src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
+    )
+    return pos + 1, src[start_pos:pos]  # Skip ending apostrophe
+
+
+def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]:
+    pos += 3
+    if src.startswith("\n", pos):
+        pos += 1
+
+    if literal:
+        delim = "'"
+        end_pos = skip_until(
+            src,
+            pos,
+            "'''",
+            error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
+            error_on_eof=True,
+        )
+        result = src[pos:end_pos]
+        pos = end_pos + 3
+    else:
+        delim = '"'
+        pos, result = parse_basic_str(src, pos, multiline=True)
+
+    # Add at maximum two extra apostrophes/quotes if the end sequence
+    # is 4 or 5 chars long instead of just 3.
+    if not src.startswith(delim, pos):
+        return pos, result
+    pos += 1
+    if not src.startswith(delim, pos):
+        return pos, result + delim
+    pos += 1
+    return pos, result + (delim * 2)
+
+
+def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]:
+    if multiline:
+        error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+        parse_escapes = parse_basic_str_escape_multiline
+    else:
+        error_on = ILLEGAL_BASIC_STR_CHARS
+        parse_escapes = parse_basic_str_escape
+    result = ""
+    start_pos = pos
+    while True:
+        try:
+            char = src[pos]
+        except IndexError:
+            raise suffixed_err(src, pos, "Unterminated string") from None
+        if char == '"':
+            if not multiline:
+                return pos + 1, result + src[start_pos:pos]
+            if src.startswith('"""', pos):
+                return pos + 3, result + src[start_pos:pos]
+            pos += 1
+            continue
+        if char == "\\":
+            result += src[start_pos:pos]
+            pos, parsed_escape = parse_escapes(src, pos)
+            result += parsed_escape
+            start_pos = pos
+            continue
+        if char in error_on:
+            raise suffixed_err(src, pos, f"Illegal character {char!r}")
+        pos += 1
+
+
+def parse_value(  # noqa: C901
+    src: str, pos: Pos, parse_float: ParseFloat
+) -> Tuple[Pos, Any]:
+    try:
+        char: Optional[str] = src[pos]
+    except IndexError:
+        char = None
+
+    # Basic strings
+    if char == '"':
+        if src.startswith('"""', pos):
+            return parse_multiline_str(src, pos, literal=False)
+        return parse_one_line_basic_str(src, pos)
+
+    # Literal strings
+    if char == "'":
+        if src.startswith("'''", pos):
+            return parse_multiline_str(src, pos, literal=True)
+        return parse_literal_str(src, pos)
+
+    # Booleans
+    if char == "t":
+        if src.startswith("true", pos):
+            return pos + 4, True
+    if char == "f":
+        if src.startswith("false", pos):
+            return pos + 5, False
+
+    # Dates and times
+    datetime_match = RE_DATETIME.match(src, pos)
+    if datetime_match:
+        try:
+            datetime_obj = match_to_datetime(datetime_match)
+        except ValueError as e:
+            raise suffixed_err(src, pos, "Invalid date or datetime") from e
+        return datetime_match.end(), datetime_obj
+    localtime_match = RE_LOCALTIME.match(src, pos)
+    if localtime_match:
+        return localtime_match.end(), match_to_localtime(localtime_match)
+
+    # Integers and "normal" floats.
+    # The regex will greedily match any type starting with a decimal
+    # char, so needs to be located after handling of dates and times.
+    number_match = RE_NUMBER.match(src, pos)
+    if number_match:
+        return number_match.end(), match_to_number(number_match, parse_float)
+
+    # Arrays
+    if char == "[":
+        return parse_array(src, pos, parse_float)
+
+    # Inline tables
+    if char == "{":
+        return parse_inline_table(src, pos, parse_float)
+
+    # Special floats
+    first_three = src[pos : pos + 3]
+    if first_three in {"inf", "nan"}:
+        return pos + 3, parse_float(first_three)
+    first_four = src[pos : pos + 4]
+    if first_four in {"-inf", "+inf", "-nan", "+nan"}:
+        return pos + 4, parse_float(first_four)
+
+    raise suffixed_err(src, pos, "Invalid value")
+
+
+def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
+    """Return a `TOMLDecodeError` where error message is suffixed with
+    coordinates in source."""
+
+    def coord_repr(src: str, pos: Pos) -> str:
+        if pos >= len(src):
+            return "end of document"
+        line = src.count("\n", 0, pos) + 1
+        if line == 1:
+            column = pos + 1
+        else:
+            column = pos - src.rindex("\n", 0, pos)
+        return f"line {line}, column {column}"
+
+    return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
+
+
+def is_unicode_scalar_value(codepoint: int) -> bool:
+    return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/_re.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,101 @@
+from datetime import date, datetime, time, timedelta, timezone, tzinfo
+from functools import lru_cache
+import re
+from typing import Any, Optional, Union
+
+from ._types import ParseFloat
+
+# E.g.
+# - 00:32:00.999999
+# - 00:32:00
+_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
+
+RE_NUMBER = re.compile(
+    r"""
+0
+(?:
+    x[0-9A-Fa-f](?:_?[0-9A-Fa-f])*   # hex
+    |
+    b[01](?:_?[01])*                 # bin
+    |
+    o[0-7](?:_?[0-7])*               # oct
+)
+|
+[+-]?(?:0|[1-9](?:_?[0-9])*)         # dec, integer part
+(?P<floatpart>
+    (?:\.[0-9](?:_?[0-9])*)?         # optional fractional part
+    (?:[eE][+-]?[0-9](?:_?[0-9])*)?  # optional exponent part
+)
+""",
+    flags=re.VERBOSE,
+)
+RE_LOCALTIME = re.compile(_TIME_RE_STR)
+RE_DATETIME = re.compile(
+    fr"""
+([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])  # date, e.g. 1988-10-27
+(?:
+    [Tt ]
+    {_TIME_RE_STR}
+    (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))?  # optional time offset
+)?
+""",
+    flags=re.VERBOSE,
+)
+
+
+def match_to_datetime(match: "re.Match") -> Union[datetime, date]:
+    """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
+
+    Raises ValueError if the match does not correspond to a valid date
+    or datetime.
+    """
+    (
+        year_str,
+        month_str,
+        day_str,
+        hour_str,
+        minute_str,
+        sec_str,
+        micros_str,
+        zulu_time,
+        offset_sign_str,
+        offset_hour_str,
+        offset_minute_str,
+    ) = match.groups()
+    year, month, day = int(year_str), int(month_str), int(day_str)
+    if hour_str is None:
+        return date(year, month, day)
+    hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
+    micros = int(micros_str.ljust(6, "0")) if micros_str else 0
+    if offset_sign_str:
+        tz: Optional[tzinfo] = cached_tz(
+            offset_hour_str, offset_minute_str, offset_sign_str
+        )
+    elif zulu_time:
+        tz = timezone.utc
+    else:  # local date-time
+        tz = None
+    return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
+
+
+@lru_cache(maxsize=None)
+def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
+    sign = 1 if sign_str == "+" else -1
+    return timezone(
+        timedelta(
+            hours=sign * int(hour_str),
+            minutes=sign * int(minute_str),
+        )
+    )
+
+
+def match_to_localtime(match: "re.Match") -> time:
+    hour_str, minute_str, sec_str, micros_str = match.groups()
+    micros = int(micros_str.ljust(6, "0")) if micros_str else 0
+    return time(int(hour_str), int(minute_str), int(sec_str), micros)
+
+
+def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any:
+    if match.group("floatpart"):
+        return parse_float(match.group())
+    return int(match.group(), 0)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/_types.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,6 @@
+from typing import Any, Callable, Tuple
+
+# Type annotations
+ParseFloat = Callable[[str], Any]
+Key = Tuple[str, ...]
+Pos = int
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/thirdparty/tomli/py.typed	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,1 @@
+# Marker file for PEP 561
--- a/mercurial/transaction.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/transaction.py	Tue Nov 07 15:21:11 2023 +0100
@@ -59,6 +59,11 @@
 ]
 
 
+def has_abandoned_transaction(repo):
+    """Return True if the repo has an abandoned transaction"""
+    return os.path.exists(repo.sjoin(b"journal"))
+
+
 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
     """remove "undo" files used by the rollback logic
 
@@ -868,7 +873,7 @@
                 self._vfsmap,
                 entries,
                 self._backupentries,
-                False,
+                unlink=True,
                 checkambigfiles=self._checkambigfiles,
             )
             self._report(_(b"rollback completed\n"))
--- a/mercurial/ui.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/ui.py	Tue Nov 07 15:21:11 2023 +0100
@@ -37,7 +37,6 @@
 from .i18n import _
 from .node import hex
 from .pycompat import (
-    getattr,
     open,
 )
 
@@ -47,6 +46,7 @@
     configitems,
     encoding,
     error,
+    extensions,
     formatter,
     loggingutil,
     progress,
@@ -659,6 +659,12 @@
         item = self._knownconfig.get(section, {}).get(name)
         alternates = [(section, name)]
 
+        if item is not None and item.in_core_extension is not None:
+            # Only return the default for an in-core extension item if said
+            # extension is enabled
+            if item.in_core_extension in extensions.extensions(self):
+                item = None
+
         if item is not None:
             alternates.extend(item.alias)
             if callable(item.default):
@@ -1460,7 +1466,7 @@
         self.flush()
 
         wasformatted = self.formatted()
-        if util.safehasattr(signal, b"SIGPIPE"):
+        if hasattr(signal, "SIGPIPE"):
             signal.signal(signal.SIGPIPE, _catchterm)
         if self._runpager(pagercmd, pagerenv):
             self.pageractive = True
@@ -1531,8 +1537,9 @@
             raise
 
         # back up original file descriptors
-        stdoutfd = os.dup(procutil.stdout.fileno())
-        stderrfd = os.dup(procutil.stderr.fileno())
+        if pycompat.sysplatform != b'OpenVMS':
+            stdoutfd = os.dup(procutil.stdout.fileno())
+            stderrfd = os.dup(procutil.stderr.fileno())
 
         os.dup2(pager.stdin.fileno(), procutil.stdout.fileno())
         if self._isatty(procutil.stderr):
@@ -1540,9 +1547,11 @@
 
         @self.atexit
         def killpager():
-            if util.safehasattr(signal, b"SIGINT"):
+            if hasattr(signal, "SIGINT"):
                 signal.signal(signal.SIGINT, signal.SIG_IGN)
             # restore original fds, closing pager.stdin copies in the process
+            if pycompat.sysplatform == b'OpenVMS':
+                pager.kill()
             os.dup2(stdoutfd, procutil.stdout.fileno())
             os.dup2(stderrfd, procutil.stderr.fileno())
             pager.stdin.close()
--- a/mercurial/unionrepo.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/unionrepo.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,9 +11,10 @@
 allowing operations like diff and log with revsets.
 """
 
+import contextlib
+
 
 from .i18n import _
-from .pycompat import getattr
 
 from . import (
     changelog,
@@ -113,7 +114,20 @@
             self.bundlerevs.add(n)
             n += 1
 
-    def _chunk(self, rev, df=None):
+    @contextlib.contextmanager
+    def reading(self):
+        if 0 <= len(self.bundlerevs) < len(self.index):
+            read_1 = super().reading
+        else:
+            read_1 = util.nullcontextmanager
+        if 0 < len(self.bundlerevs):
+            read_2 = self.revlog2.reading
+        else:
+            read_2 = util.nullcontextmanager
+        with read_1(), read_2():
+            yield
+
+    def _chunk(self, rev):
         if rev <= self.repotiprev:
             return revlog.revlog._chunk(self, rev)
         return self.revlog2._chunk(self.node(rev))
@@ -130,7 +144,7 @@
 
         return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
 
-    def _revisiondata(self, nodeorrev, _df=None, raw=False):
+    def _revisiondata(self, nodeorrev, raw=False):
         if isinstance(nodeorrev, int):
             rev = nodeorrev
             node = self.node(rev)
@@ -144,7 +158,7 @@
             func = revlog2._revisiondata
         else:
             func = super(unionrevlog, self)._revisiondata
-        return func(node, _df=_df, raw=raw)
+        return func(node, raw=raw)
 
     def addrevision(
         self,
@@ -191,6 +205,8 @@
 
 class unionmanifest(unionrevlog, manifest.manifestrevlog):
     def __init__(self, nodeconstants, opener, opener2, linkmapper):
+        # XXX manifestrevlog is not actually a revlog , so mixing it with
+        # bundlerevlog is not a good idea.
         manifest.manifestrevlog.__init__(self, nodeconstants, opener)
         manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
         unionrevlog.__init__(
--- a/mercurial/upgrade_utils/actions.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/upgrade_utils/actions.py	Tue Nov 07 15:21:11 2023 +0100
@@ -670,7 +670,7 @@
     newactions = []
 
     for d in format_upgrades:
-        if util.safehasattr(d, '_requirement'):
+        if hasattr(d, '_requirement'):
             name = d._requirement
         else:
             name = None
--- a/mercurial/upgrade_utils/engine.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/upgrade_utils/engine.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
 import stat
 
 from ..i18n import _
-from ..pycompat import getattr
 from .. import (
     error,
     metadata,
--- a/mercurial/url.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/url.py	Tue Nov 07 15:21:11 2023 +0100
@@ -190,7 +190,7 @@
     return _sendfile
 
 
-has_https = util.safehasattr(urlreq, 'httpshandler')
+has_https = hasattr(urlreq, 'httpshandler')
 
 
 class httpconnection(keepalive.HTTPConnection):
@@ -222,47 +222,6 @@
     h.headers = None
 
 
-def _generic_proxytunnel(self: "httpsconnection"):
-    headers = self.headers  # pytype: disable=attribute-error
-    proxyheaders = {
-        pycompat.bytestr(x): pycompat.bytestr(headers[x])
-        for x in headers
-        if x.lower().startswith('proxy-')
-    }
-    realhostport = self.realhostport  # pytype: disable=attribute-error
-    self.send(b'CONNECT %s HTTP/1.0\r\n' % realhostport)
-
-    for header in proxyheaders.items():
-        self.send(b'%s: %s\r\n' % header)
-    self.send(b'\r\n')
-
-    # majority of the following code is duplicated from
-    # httplib.HTTPConnection as there are no adequate places to
-    # override functions to provide the needed functionality.
-
-    # pytype: disable=attribute-error
-    res = self.response_class(self.sock, method=self._method)
-    # pytype: enable=attribute-error
-
-    while True:
-        # pytype: disable=attribute-error
-        version, status, reason = res._read_status()
-        # pytype: enable=attribute-error
-        if status != httplib.CONTINUE:
-            break
-        # skip lines that are all whitespace
-        list(iter(lambda: res.fp.readline().strip(), b''))
-
-    if status == 200:
-        # skip lines until we find a blank line
-        list(iter(res.fp.readline, b'\r\n'))
-    else:
-        self.close()
-        raise socket.error(
-            "Tunnel connection failed: %d %s" % (status, reason.strip())
-        )
-
-
 class httphandler(keepalive.HTTPHandler):
     def http_open(self, req):
         return self.do_open(httpconnection, req)
@@ -306,6 +265,46 @@
 
 if has_https:
 
+    def _generic_proxytunnel(self: "httpsconnection"):
+        headers = self.headers  # pytype: disable=attribute-error
+        proxyheaders = {
+            pycompat.bytestr(x): pycompat.bytestr(headers[x])
+            for x in headers
+            if x.lower().startswith('proxy-')
+        }
+        realhostport = self.realhostport  # pytype: disable=attribute-error
+        self.send(b'CONNECT %s HTTP/1.0\r\n' % realhostport)
+
+        for header in proxyheaders.items():
+            self.send(b'%s: %s\r\n' % header)
+        self.send(b'\r\n')
+
+        # majority of the following code is duplicated from
+        # httplib.HTTPConnection as there are no adequate places to
+        # override functions to provide the needed functionality.
+
+        # pytype: disable=attribute-error
+        res = self.response_class(self.sock, method=self._method)
+        # pytype: enable=attribute-error
+
+        while True:
+            # pytype: disable=attribute-error
+            version, status, reason = res._read_status()
+            # pytype: enable=attribute-error
+            if status != httplib.CONTINUE:
+                break
+            # skip lines that are all whitespace
+            list(iter(lambda: res.fp.readline().strip(), b''))
+
+        if status == 200:
+            # skip lines until we find a blank line
+            list(iter(res.fp.readline, b'\r\n'))
+        else:
+            self.close()
+            raise socket.error(
+                "Tunnel connection failed: %d %s" % (status, reason.strip())
+            )
+
     class httpsconnection(keepalive.HTTPConnection):
         response_class = keepalive.HTTPResponse
         default_port = httplib.HTTPS_PORT
@@ -542,7 +541,10 @@
     else:
         handlers.append(httphandler(timeout=timeout))
         if has_https:
-            handlers.append(httpshandler(ui, timeout=timeout))
+            # pytype get confused about the conditional existence for httpshandler here.
+            handlers.append(
+                httpshandler(ui, timeout=timeout)  # pytype: disable=name-error
+            )
 
     handlers.append(proxyhandler(ui))
 
--- a/mercurial/urllibcompat.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/urllibcompat.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,6 @@
 import urllib.request
 import urllib.response
 
-from .pycompat import getattr
 from . import pycompat
 
 _sysstr = pycompat.sysstr
--- a/mercurial/util.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/util.py	Tue Nov 07 15:21:11 2023 +0100
@@ -37,10 +37,7 @@
 from .node import hex
 from .thirdparty import attr
 from .pycompat import (
-    delattr,
-    getattr,
     open,
-    setattr,
 )
 from hgdemandimport import tracing
 from . import (
@@ -2583,7 +2580,7 @@
             self._fp.close()
 
     def __del__(self):
-        if safehasattr(self, '_fp'):  # constructor actually did something
+        if hasattr(self, '_fp'):  # constructor actually did something
             self.discard()
 
     def __enter__(self):
--- a/mercurial/utils/compression.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/utils/compression.py	Tue Nov 07 15:21:11 2023 +0100
@@ -8,7 +8,6 @@
 import collections
 import zlib
 
-from ..pycompat import getattr
 from .. import (
     error,
     i18n,
@@ -16,8 +15,6 @@
 )
 from . import stringutil
 
-safehasattr = pycompat.safehasattr
-
 
 _ = i18n._
 
@@ -185,7 +182,7 @@
         """
         assert role in (SERVERROLE, CLIENTROLE)
 
-        attr = b'serverpriority' if role == SERVERROLE else b'clientpriority'
+        attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
 
         engines = [self._engines[e] for e in self._wiretypes.values()]
         if onlyavailable:
@@ -340,7 +337,7 @@
 
 class _CompressedStreamReader:
     def __init__(self, fh):
-        if safehasattr(fh, 'unbufferedread'):
+        if hasattr(fh, 'unbufferedread'):
             self._reader = fh.unbufferedread
         else:
             self._reader = fh.read
--- a/mercurial/utils/procutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/utils/procutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -24,7 +24,6 @@
 
 from ..i18n import _
 from ..pycompat import (
-    getattr,
     open,
 )
 
@@ -182,7 +181,7 @@
     # Work around Windows bugs.
     stdout = platform.winstdout(stdout)  # pytype: disable=module-attr
     stderr = platform.winstdout(stderr)  # pytype: disable=module-attr
-if isatty(stdout):
+if isatty(stdout) and pycompat.sysplatform != b'OpenVMS':
     # The standard library doesn't offer line-buffered binary streams.
     stdout = make_line_buffered(stdout)
 
@@ -209,7 +208,7 @@
 except AttributeError:
     pass
 
-closefds = pycompat.isposix
+closefds = pycompat.isposix and pycompat.sysplatform != b'OpenVMS'
 
 
 def explainexit(code):
@@ -339,8 +338,6 @@
         cmd = cmd.replace(b'INFILE', inname)
         cmd = cmd.replace(b'OUTFILE', outname)
         code = system(cmd)
-        if pycompat.sysplatform == b'OpenVMS' and code & 1:
-            code = 0
         if code:
             raise error.Abort(
                 _(b"command '%s' failed: %s") % (cmd, explainexit(code))
@@ -384,8 +381,10 @@
     Defaults to $HG or 'hg' in the search path.
     """
     if _hgexecutable is None:
-        hg = encoding.environ.get(b'HG')
+        hg = encoding.environ.get(b'HG', '')
         mainmod = sys.modules['__main__']
+        if pycompat.sysplatform == b'OpenVMS' and hg[0:1] == '$':
+            hg = 'mcr ' + hg[1:]
         if hg:
             _sethgexecutable(hg)
         elif resourceutil.mainfrozen():
@@ -534,8 +533,6 @@
             out.write(line)
         proc.wait()
         rc = proc.returncode
-    if pycompat.sysplatform == b'OpenVMS' and rc & 1:
-        rc = 0
     return rc
 
 
--- a/mercurial/utils/resourceutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/utils/resourceutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -22,8 +22,8 @@
     (portable, not much used).
     """
     return (
-        pycompat.safehasattr(sys, "frozen")  # new py2exe
-        or pycompat.safehasattr(sys, "importers")  # old py2exe
+        hasattr(sys, "frozen")  # new py2exe
+        or hasattr(sys, "importers")  # old py2exe
         or _imp.is_frozen("__main__")  # tools/freeze
     )
 
@@ -59,7 +59,7 @@
     from importlib import resources  # pytype: disable=import-error
 
     # Force loading of the resources module
-    if pycompat.safehasattr(resources, 'files'):
+    if hasattr(resources, 'files'):
         resources.files  # pytype: disable=module-attr
     else:
         resources.open_binary  # pytype: disable=module-attr
@@ -95,7 +95,7 @@
     from .. import encoding
 
     def open_resource(package, name):
-        if pycompat.safehasattr(resources, 'files'):
+        if hasattr(resources, 'files'):
             return (
                 resources.files(  # pytype: disable=module-attr
                     pycompat.sysstr(package)
--- a/mercurial/utils/urlutil.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/utils/urlutil.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,15 +9,12 @@
 import socket
 
 from ..i18n import _
-from ..pycompat import (
-    getattr,
-    setattr,
-)
 from .. import (
     encoding,
     error,
     pycompat,
     urllibcompat,
+    util,
 )
 
 from . import (
@@ -257,18 +254,20 @@
     def __repr__(self):
         attrs = []
         for a in (
-            b'scheme',
-            b'user',
-            b'passwd',
-            b'host',
-            b'port',
-            b'path',
-            b'query',
-            b'fragment',
+            'scheme',
+            'user',
+            'passwd',
+            'host',
+            'port',
+            'path',
+            'query',
+            'fragment',
         ):
             v = getattr(self, a)
             if v is not None:
-                attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
+                line = b'%s: %r'
+                line %= (pycompat.bytestr(a), pycompat.bytestr(v))
+                attrs.append(line)
         return b'<url %s>' % b', '.join(attrs)
 
     def __bytes__(self):
@@ -679,6 +678,10 @@
     This decorator can be used to perform additional verification of
     sub-options and to change the type of sub-options.
     """
+    if isinstance(attr, bytes):
+        msg = b'pathsuboption take `str` as "attr" argument, not `bytes`'
+        util.nouideprecwarn(msg, b"6.6", stacklevel=2)
+        attr = attr.decode('ascii')
 
     def register(func):
         _pathsuboptions[option] = (attr, func)
@@ -693,7 +696,7 @@
     return b'yes' if value else b'no'
 
 
-@pathsuboption(b'pushurl', b'_pushloc')
+@pathsuboption(b'pushurl', '_pushloc')
 def pushurlpathoption(ui, path, value):
     u = url(value)
     # Actually require a URL.
@@ -718,7 +721,7 @@
     return bytes(u)
 
 
-@pathsuboption(b'pushrev', b'pushrev')
+@pathsuboption(b'pushrev', 'pushrev')
 def pushrevpathoption(ui, path, value):
     return value
 
@@ -730,7 +733,7 @@
 }
 
 
-@pathsuboption(b'bookmarks.mode', b'bookmarks_mode')
+@pathsuboption(b'bookmarks.mode', 'bookmarks_mode')
 def bookmarks_mode_option(ui, path, value):
     if value not in SUPPORTED_BOOKMARKS_MODES:
         path_name = path.name
@@ -756,7 +759,7 @@
 
 @pathsuboption(
     b'pulled-delta-reuse-policy',
-    b'delta_reuse_policy',
+    'delta_reuse_policy',
     display=DELTA_REUSE_POLICIES_NAME.get,
 )
 def delta_reuse_policy(ui, path, value):
@@ -773,7 +776,7 @@
     return DELTA_REUSE_POLICIES.get(value)
 
 
-@pathsuboption(b'multi-urls', b'multi_urls', display=display_bool)
+@pathsuboption(b'multi-urls', 'multi_urls', display=display_bool)
 def multiurls_pathoption(ui, path, value):
     res = stringutil.parsebool(value)
     if res is None:
--- a/mercurial/verify.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/verify.py	Tue Nov 07 15:21:11 2023 +0100
@@ -17,6 +17,7 @@
     pycompat,
     requirements,
     revlog,
+    transaction,
     util,
 )
 
@@ -195,7 +196,7 @@
         if not repo.url().startswith(b'file:'):
             raise error.Abort(_(b"cannot verify bundle or remote repos"))
 
-        if os.path.exists(repo.sjoin(b"journal")):
+        if transaction.has_abandoned_transaction(repo):
             ui.warn(_(b"abandoned transaction found - run hg recover\n"))
 
         if ui.verbose or not self.revlogv1:
@@ -269,22 +270,23 @@
         progress = ui.makeprogress(
             _(b'checking'), unit=_(b'changesets'), total=len(repo)
         )
-        for i in repo:
-            progress.update(i)
-            n = cl.node(i)
-            self._checkentry(cl, i, n, seen, [i], b"changelog")
+        with cl.reading():
+            for i in repo:
+                progress.update(i)
+                n = cl.node(i)
+                self._checkentry(cl, i, n, seen, [i], b"changelog")
 
-            try:
-                changes = cl.read(n)
-                if changes[0] != self.repo.nullid:
-                    mflinkrevs.setdefault(changes[0], []).append(i)
+                try:
+                    changes = cl.read(n)
+                    if changes[0] != self.repo.nullid:
+                        mflinkrevs.setdefault(changes[0], []).append(i)
+                        self.refersmf = True
+                    for f in changes[3]:
+                        if match(f):
+                            filelinkrevs.setdefault(_normpath(f), []).append(i)
+                except Exception as inst:
                     self.refersmf = True
-                for f in changes[3]:
-                    if match(f):
-                        filelinkrevs.setdefault(_normpath(f), []).append(i)
-            except Exception as inst:
-                self.refersmf = True
-                self._exc(i, _(b"unpacking changeset %s") % short(n), inst)
+                    self._exc(i, _(b"unpacking changeset %s") % short(n), inst)
         progress.complete()
         return mflinkrevs, filelinkrevs
 
--- a/mercurial/vfs.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/vfs.py	Tue Nov 07 15:21:11 2023 +0100
@@ -16,11 +16,6 @@
 )
 
 from .i18n import _
-from .pycompat import (
-    delattr,
-    getattr,
-    setattr,
-)
 from . import (
     encoding,
     error,
--- a/mercurial/windows.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/windows.py	Tue Nov 07 15:21:11 2023 +0100
@@ -33,7 +33,6 @@
 )
 
 from .i18n import _
-from .pycompat import getattr
 from . import (
     encoding,
     error,
--- a/mercurial/wireprotoframing.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/wireprotoframing.py	Tue Nov 07 15:21:11 2023 +0100
@@ -14,7 +14,6 @@
 import struct
 
 from .i18n import _
-from .pycompat import getattr
 from .thirdparty import attr
 from . import (
     encoding,
--- a/mercurial/wireprototypes.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/wireprototypes.py	Tue Nov 07 15:21:11 2023 +0100
@@ -9,7 +9,6 @@
     hex,
 )
 from .i18n import _
-from .pycompat import getattr
 from .thirdparty import attr
 from . import (
     error,
@@ -367,9 +366,7 @@
     # No explicit config. Filter out the ones that aren't supposed to be
     # advertised and return default ordering.
     if not configengines:
-        attr = (
-            b'serverpriority' if role == util.SERVERROLE else b'clientpriority'
-        )
+        attr = 'serverpriority' if role == util.SERVERROLE else 'clientpriority'
         return [
             e for e in compengines if getattr(e.wireprotosupport(), attr) > 0
         ]
--- a/mercurial/wireprotov1peer.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/wireprotov1peer.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,10 +12,6 @@
 from concurrent import futures
 from .i18n import _
 from .node import bin
-from .pycompat import (
-    getattr,
-    setattr,
-)
 from . import (
     bundle2,
     changegroup as changegroupmod,
@@ -499,7 +495,7 @@
         else:
             heads = wireprototypes.encodelist(heads)
 
-        if util.safehasattr(bundle, 'deltaheader'):
+        if hasattr(bundle, 'deltaheader'):
             # this a bundle10, do the old style call sequence
             ret, output = self._callpush(b"unbundle", bundle, heads=heads)
             if ret == b"":
--- a/mercurial/wireprotov1server.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/wireprotov1server.py	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,6 @@
 
 from .i18n import _
 from .node import hex
-from .pycompat import getattr
 
 from . import (
     bundle2,
@@ -721,7 +720,7 @@
                 r = exchange.unbundle(
                     repo, gen, their_heads, b'serve', proto.client()
                 )
-                if util.safehasattr(r, 'addpart'):
+                if hasattr(r, 'addpart'):
                     # The return looks streamable, we are in the bundle2 case
                     # and should return a stream.
                     return wireprototypes.streamreslegacy(gen=r.getchunks())
--- a/mercurial/worker.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/mercurial/worker.py	Tue Nov 07 15:21:11 2023 +0100
@@ -61,7 +61,9 @@
     return threading.current_thread() == threading.main_thread()
 
 
-if pycompat.isposix or pycompat.iswindows:
+if (
+    pycompat.isposix and pycompat.sysplatform != b'OpenVMS'
+) or pycompat.iswindows:
     _STARTUP_COST = 0.01
     # The Windows worker is thread based. If tasks are CPU bound, threads
     # in the presence of the GIL result in excessive context switching and
--- a/relnotes/next	Mon Nov 06 15:38:27 2023 +0100
+++ b/relnotes/next	Tue Nov 07 15:21:11 2023 +0100
@@ -13,6 +13,8 @@
 
 == Backwards Compatibility Changes ==
 
+* remove the experimental infinite push extension
+
 == Internal API Changes ==
 
 == Miscellaneous ==
--- a/rust/Cargo.lock	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/Cargo.lock	Tue Nov 07 15:21:11 2023 +0100
@@ -476,6 +476,12 @@
 
 [[package]]
 name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hashbrown"
 version = "0.13.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038"
@@ -517,7 +523,7 @@
  "derive_more",
  "flate2",
  "format-bytes",
- "hashbrown",
+ "hashbrown 0.13.1",
  "home",
  "im-rc",
  "itertools",
@@ -535,9 +541,11 @@
  "regex",
  "same-file",
  "self_cell",
+ "serde",
  "sha-1 0.10.0",
  "tempfile",
  "thread_local",
+ "toml",
  "twox-hash",
  "zstd",
 ]
@@ -610,6 +618,16 @@
 ]
 
 [[package]]
+name = "indexmap"
+version = "1.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
+dependencies = [
+ "autocfg",
+ "hashbrown 0.12.3",
+]
+
+[[package]]
 name = "instant"
 version = "0.1.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -749,6 +767,15 @@
 ]
 
 [[package]]
+name = "nom8"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
 name = "num-integer"
 version = "0.1.45"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1107,6 +1134,35 @@
 checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4"
 
 [[package]]
+name = "serde"
+version = "1.0.152"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.152"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_spanned"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4"
+dependencies = [
+ "serde",
+]
+
+[[package]]
 name = "sha-1"
 version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1160,9 +1216,9 @@
 
 [[package]]
 name = "syn"
-version = "1.0.103"
+version = "1.0.109"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -1213,6 +1269,40 @@
 ]
 
 [[package]]
+name = "toml"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fb9d890e4dc9298b70f740f615f2e05b9db37dce531f6b24fb77ac993f9f217"
+dependencies = [
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "toml_edit",
+]
+
+[[package]]
+name = "toml_datetime"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "toml_edit"
+version = "0.18.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56c59d8dd7d0dcbc6428bf7aa2f0e823e26e43b3c9aca15bbc9475d23e5fa12b"
+dependencies = [
+ "indexmap",
+ "nom8",
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+]
+
+[[package]]
 name = "twox-hash"
 version = "1.6.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
--- a/rust/Cargo.toml	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/Cargo.toml	Tue Nov 07 15:21:11 2023 +0100
@@ -1,3 +1,4 @@
 [workspace]
 members = ["hg-core", "hg-cpython", "rhg"]
 exclude = ["chg", "hgcli"]
+resolver = "2"
--- a/rust/hg-core/Cargo.toml	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/Cargo.toml	Tue Nov 07 15:21:11 2023 +0100
@@ -26,10 +26,12 @@
 rayon = "1.7.0"
 regex = "1.7.0"
 self_cell = "1.0"
+serde = { version = "1.0", features = ["derive"] }
 sha-1 = "0.10.0"
 twox-hash = "1.6.3"
 same-file = "1.0.6"
 tempfile = "3.3.0"
+toml = "0.6"
 thread_local = "1.1.4"
 crossbeam-channel = "0.5.6"
 log = "0.4.17"
@@ -46,5 +48,5 @@
 default-features = false
 
 [dev-dependencies]
-clap = { version = "4.0.24", features = ["derive"] }
+clap = { version = "~4.0", features = ["derive"] }
 pretty_assertions = "1.1.0"
--- a/rust/hg-core/examples/nodemap/index.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/examples/nodemap/index.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -29,7 +29,7 @@
 
 impl IndexEntry {
     fn parents(&self) -> [Revision; 2] {
-        [Revision::from_be(self.p1), Revision::from_be(self.p1)]
+        [self.p1, self.p2]
     }
 }
 
@@ -42,23 +42,18 @@
         if rev == NULL_REVISION {
             return None;
         }
-        let i = rev as usize;
-        if i >= self.len() {
-            None
-        } else {
-            Some(&self.data[i].node)
-        }
+        Some(&self.data[rev.0 as usize].node)
     }
 }
 
 impl Graph for &Index {
     fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
-        let [p1, p2] = (*self).data[rev as usize].parents();
+        let [p1, p2] = self.data[rev.0 as usize].parents();
         let len = (*self).len();
         if p1 < NULL_REVISION
             || p2 < NULL_REVISION
-            || p1 as usize >= len
-            || p2 as usize >= len
+            || p1.0 as usize >= len
+            || p2.0 as usize >= len
         {
             return Err(GraphError::ParentOutOfRange(rev));
         }
--- a/rust/hg-core/examples/nodemap/main.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/examples/nodemap/main.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -36,7 +36,7 @@
     let start = Instant::now();
     let mut nm = NodeTree::default();
     for rev in 0..index.len() {
-        let rev = rev as Revision;
+        let rev = Revision(rev as BaseRevision);
         nm.insert(index, index.node(rev).unwrap(), rev).unwrap();
     }
     eprintln!("Nodemap constructed in RAM in {:?}", start.elapsed());
@@ -55,7 +55,11 @@
     let len = index.len() as u32;
     let mut rng = rand::thread_rng();
     let nodes: Vec<Node> = (0..queries)
-        .map(|_| *index.node((rng.gen::<u32>() % len) as Revision).unwrap())
+        .map(|_| {
+            *index
+                .node(Revision((rng.gen::<u32>() % len) as BaseRevision))
+                .unwrap()
+        })
         .collect();
     if queries < 10 {
         let nodes_hex: Vec<String> =
--- a/rust/hg-core/src/ancestors.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/ancestors.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -247,7 +247,9 @@
                 revs.remove(&curr);
                 self.add_parents(curr)?;
             }
-            curr -= 1;
+            // We know this revision is safe because we've checked the bounds
+            // before.
+            curr = Revision(curr.0 - 1);
         }
         Ok(())
     }
@@ -297,14 +299,14 @@
 
         // TODO heuristics for with_capacity()?
         let mut missing: Vec<Revision> = Vec::new();
-        for curr in (0..=start).rev() {
+        for curr in (0..=start.0).rev() {
             if revs_visit.is_empty() {
                 break;
             }
-            if both_visit.remove(&curr) {
+            if both_visit.remove(&Revision(curr)) {
                 // curr's parents might have made it into revs_visit through
                 // another path
-                for p in self.graph.parents(curr)?.iter().cloned() {
+                for p in self.graph.parents(Revision(curr))?.iter().cloned() {
                     if p == NULL_REVISION {
                         continue;
                     }
@@ -312,9 +314,9 @@
                     bases_visit.insert(p);
                     both_visit.insert(p);
                 }
-            } else if revs_visit.remove(&curr) {
-                missing.push(curr);
-                for p in self.graph.parents(curr)?.iter().cloned() {
+            } else if revs_visit.remove(&Revision(curr)) {
+                missing.push(Revision(curr));
+                for p in self.graph.parents(Revision(curr))?.iter().cloned() {
                     if p == NULL_REVISION {
                         continue;
                     }
@@ -331,8 +333,8 @@
                         revs_visit.insert(p);
                     }
                 }
-            } else if bases_visit.contains(&curr) {
-                for p in self.graph.parents(curr)?.iter().cloned() {
+            } else if bases_visit.contains(&Revision(curr)) {
+                for p in self.graph.parents(Revision(curr))?.iter().cloned() {
                     if p == NULL_REVISION {
                         continue;
                     }
@@ -356,7 +358,41 @@
 mod tests {
 
     use super::*;
-    use crate::testing::{SampleGraph, VecGraph};
+    use crate::{
+        testing::{SampleGraph, VecGraph},
+        BaseRevision,
+    };
+
+    impl From<BaseRevision> for Revision {
+        fn from(value: BaseRevision) -> Self {
+            if !cfg!(test) {
+                panic!("should only be used in tests")
+            }
+            Revision(value)
+        }
+    }
+
+    impl PartialEq<BaseRevision> for Revision {
+        fn eq(&self, other: &BaseRevision) -> bool {
+            if !cfg!(test) {
+                panic!("should only be used in tests")
+            }
+            self.0.eq(other)
+        }
+    }
+
+    impl PartialEq<u32> for Revision {
+        fn eq(&self, other: &u32) -> bool {
+            if !cfg!(test) {
+                panic!("should only be used in tests")
+            }
+            let check: Result<u32, _> = self.0.try_into();
+            match check {
+                Ok(value) => value.eq(other),
+                Err(_) => false,
+            }
+        }
+    }
 
     fn list_ancestors<G: Graph>(
         graph: G,
@@ -374,37 +410,80 @@
     /// Same tests as test-ancestor.py, without membership
     /// (see also test-ancestor.py.out)
     fn test_list_ancestor() {
-        assert_eq!(list_ancestors(SampleGraph, vec![], 0, false), vec![]);
+        assert_eq!(
+            list_ancestors(SampleGraph, vec![], 0.into(), false),
+            Vec::<Revision>::new()
+        );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![11, 13], 0, false),
+            list_ancestors(
+                SampleGraph,
+                vec![11.into(), 13.into()],
+                0.into(),
+                false
+            ),
             vec![8, 7, 4, 3, 2, 1, 0]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![1, 3], 0, false),
+            list_ancestors(
+                SampleGraph,
+                vec![1.into(), 3.into()],
+                0.into(),
+                false
+            ),
             vec![1, 0]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![11, 13], 0, true),
+            list_ancestors(
+                SampleGraph,
+                vec![11.into(), 13.into()],
+                0.into(),
+                true
+            ),
             vec![13, 11, 8, 7, 4, 3, 2, 1, 0]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![11, 13], 6, false),
+            list_ancestors(
+                SampleGraph,
+                vec![11.into(), 13.into()],
+                6.into(),
+                false
+            ),
             vec![8, 7]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![11, 13], 6, true),
+            list_ancestors(
+                SampleGraph,
+                vec![11.into(), 13.into()],
+                6.into(),
+                true
+            ),
             vec![13, 11, 8, 7]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![11, 13], 11, true),
+            list_ancestors(
+                SampleGraph,
+                vec![11.into(), 13.into()],
+                11.into(),
+                true
+            ),
             vec![13, 11]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![11, 13], 12, true),
+            list_ancestors(
+                SampleGraph,
+                vec![11.into(), 13.into()],
+                12.into(),
+                true
+            ),
             vec![13]
         );
         assert_eq!(
-            list_ancestors(SampleGraph, vec![10, 1], 0, true),
+            list_ancestors(
+                SampleGraph,
+                vec![10.into(), 1.into()],
+                0.into(),
+                true
+            ),
             vec![10, 5, 4, 2, 1, 0]
         );
     }
@@ -415,33 +494,53 @@
     /// suite.
     /// For instance, run tests/test-obsolete-checkheads.t
     fn test_nullrev_input() {
-        let mut iter =
-            AncestorsIterator::new(SampleGraph, vec![-1], 0, false).unwrap();
+        let mut iter = AncestorsIterator::new(
+            SampleGraph,
+            vec![Revision(-1)],
+            0.into(),
+            false,
+        )
+        .unwrap();
         assert_eq!(iter.next(), None)
     }
 
     #[test]
     fn test_contains() {
-        let mut lazy =
-            AncestorsIterator::new(SampleGraph, vec![10, 1], 0, true).unwrap();
-        assert!(lazy.contains(1).unwrap());
-        assert!(!lazy.contains(3).unwrap());
+        let mut lazy = AncestorsIterator::new(
+            SampleGraph,
+            vec![10.into(), 1.into()],
+            0.into(),
+            true,
+        )
+        .unwrap();
+        assert!(lazy.contains(1.into()).unwrap());
+        assert!(!lazy.contains(3.into()).unwrap());
 
-        let mut lazy =
-            AncestorsIterator::new(SampleGraph, vec![0], 0, false).unwrap();
+        let mut lazy = AncestorsIterator::new(
+            SampleGraph,
+            vec![0.into()],
+            0.into(),
+            false,
+        )
+        .unwrap();
         assert!(!lazy.contains(NULL_REVISION).unwrap());
     }
 
     #[test]
     fn test_peek() {
-        let mut iter =
-            AncestorsIterator::new(SampleGraph, vec![10], 0, true).unwrap();
+        let mut iter = AncestorsIterator::new(
+            SampleGraph,
+            vec![10.into()],
+            0.into(),
+            true,
+        )
+        .unwrap();
         // peek() gives us the next value
-        assert_eq!(iter.peek(), Some(10));
+        assert_eq!(iter.peek(), Some(10.into()));
         // but it's not been consumed
-        assert_eq!(iter.next(), Some(Ok(10)));
+        assert_eq!(iter.next(), Some(Ok(10.into())));
         // and iteration resumes normally
-        assert_eq!(iter.next(), Some(Ok(5)));
+        assert_eq!(iter.next(), Some(Ok(5.into())));
 
         // let's drain the iterator to test peek() at the end
         while iter.next().is_some() {}
@@ -450,19 +549,29 @@
 
     #[test]
     fn test_empty() {
-        let mut iter =
-            AncestorsIterator::new(SampleGraph, vec![10], 0, true).unwrap();
+        let mut iter = AncestorsIterator::new(
+            SampleGraph,
+            vec![10.into()],
+            0.into(),
+            true,
+        )
+        .unwrap();
         assert!(!iter.is_empty());
         while iter.next().is_some() {}
         assert!(!iter.is_empty());
 
-        let iter =
-            AncestorsIterator::new(SampleGraph, vec![], 0, true).unwrap();
+        let iter = AncestorsIterator::new(SampleGraph, vec![], 0.into(), true)
+            .unwrap();
         assert!(iter.is_empty());
 
         // case where iter.seen == {NULL_REVISION}
-        let iter =
-            AncestorsIterator::new(SampleGraph, vec![0], 0, false).unwrap();
+        let iter = AncestorsIterator::new(
+            SampleGraph,
+            vec![0.into()],
+            0.into(),
+            false,
+        )
+        .unwrap();
         assert!(iter.is_empty());
     }
 
@@ -471,9 +580,11 @@
     struct Corrupted;
 
     impl Graph for Corrupted {
+        // FIXME what to do about this? Are we just not supposed to get them
+        // anymore?
         fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
             match rev {
-                1 => Ok([0, -1]),
+                Revision(1) => Ok([0.into(), (-1).into()]),
                 r => Err(GraphError::ParentOutOfRange(r)),
             }
         }
@@ -482,9 +593,14 @@
     #[test]
     fn test_initrev_out_of_range() {
         // inclusive=false looks up initrev's parents right away
-        match AncestorsIterator::new(SampleGraph, vec![25], 0, false) {
+        match AncestorsIterator::new(
+            SampleGraph,
+            vec![25.into()],
+            0.into(),
+            false,
+        ) {
             Ok(_) => panic!("Should have been ParentOutOfRange"),
-            Err(e) => assert_eq!(e, GraphError::ParentOutOfRange(25)),
+            Err(e) => assert_eq!(e, GraphError::ParentOutOfRange(25.into())),
         }
     }
 
@@ -492,22 +608,29 @@
     fn test_next_out_of_range() {
         // inclusive=false looks up initrev's parents right away
         let mut iter =
-            AncestorsIterator::new(Corrupted, vec![1], 0, false).unwrap();
-        assert_eq!(iter.next(), Some(Err(GraphError::ParentOutOfRange(0))));
+            AncestorsIterator::new(Corrupted, vec![1.into()], 0.into(), false)
+                .unwrap();
+        assert_eq!(
+            iter.next(),
+            Some(Err(GraphError::ParentOutOfRange(0.into())))
+        );
     }
 
     #[test]
     /// Test constructor, add/get bases and heads
     fn test_missing_bases() -> Result<(), GraphError> {
-        let mut missing_ancestors =
-            MissingAncestors::new(SampleGraph, [5, 3, 1, 3].iter().cloned());
+        let mut missing_ancestors = MissingAncestors::new(
+            SampleGraph,
+            [5.into(), 3.into(), 1.into(), 3.into()].iter().cloned(),
+        );
         let mut as_vec: Vec<Revision> =
             missing_ancestors.get_bases().iter().cloned().collect();
         as_vec.sort_unstable();
         assert_eq!(as_vec, [1, 3, 5]);
         assert_eq!(missing_ancestors.max_base, 5);
 
-        missing_ancestors.add_bases([3, 7, 8].iter().cloned());
+        missing_ancestors
+            .add_bases([3.into(), 7.into(), 8.into()].iter().cloned());
         as_vec = missing_ancestors.get_bases().iter().cloned().collect();
         as_vec.sort_unstable();
         assert_eq!(as_vec, [1, 3, 5, 7, 8]);
@@ -520,13 +643,16 @@
     }
 
     fn assert_missing_remove(
-        bases: &[Revision],
-        revs: &[Revision],
-        expected: &[Revision],
+        bases: &[BaseRevision],
+        revs: &[BaseRevision],
+        expected: &[BaseRevision],
     ) {
-        let mut missing_ancestors =
-            MissingAncestors::new(SampleGraph, bases.iter().cloned());
-        let mut revset: HashSet<Revision> = revs.iter().cloned().collect();
+        let mut missing_ancestors = MissingAncestors::new(
+            SampleGraph,
+            bases.iter().map(|r| Revision(*r)),
+        );
+        let mut revset: HashSet<Revision> =
+            revs.iter().map(|r| Revision(*r)).collect();
         missing_ancestors
             .remove_ancestors_from(&mut revset)
             .unwrap();
@@ -547,14 +673,16 @@
     }
 
     fn assert_missing_ancestors(
-        bases: &[Revision],
-        revs: &[Revision],
-        expected: &[Revision],
+        bases: &[BaseRevision],
+        revs: &[BaseRevision],
+        expected: &[BaseRevision],
     ) {
-        let mut missing_ancestors =
-            MissingAncestors::new(SampleGraph, bases.iter().cloned());
+        let mut missing_ancestors = MissingAncestors::new(
+            SampleGraph,
+            bases.iter().map(|r| Revision(*r)),
+        );
         let missing = missing_ancestors
-            .missing_ancestors(revs.iter().cloned())
+            .missing_ancestors(revs.iter().map(|r| Revision(*r)))
             .unwrap();
         assert_eq!(missing.as_slice(), expected);
     }
@@ -575,110 +703,115 @@
     #[allow(clippy::unnecessary_cast)]
     #[test]
     fn test_remove_ancestors_from_case1() {
+        const FAKE_NULL_REVISION: BaseRevision = -1;
+        assert_eq!(FAKE_NULL_REVISION, NULL_REVISION.0);
         let graph: VecGraph = vec![
-            [NULL_REVISION, NULL_REVISION],
-            [0, NULL_REVISION],
+            [FAKE_NULL_REVISION, FAKE_NULL_REVISION],
+            [0, FAKE_NULL_REVISION],
             [1, 0],
             [2, 1],
-            [3, NULL_REVISION],
-            [4, NULL_REVISION],
+            [3, FAKE_NULL_REVISION],
+            [4, FAKE_NULL_REVISION],
             [5, 1],
-            [2, NULL_REVISION],
-            [7, NULL_REVISION],
-            [8, NULL_REVISION],
-            [9, NULL_REVISION],
+            [2, FAKE_NULL_REVISION],
+            [7, FAKE_NULL_REVISION],
+            [8, FAKE_NULL_REVISION],
+            [9, FAKE_NULL_REVISION],
             [10, 1],
-            [3, NULL_REVISION],
-            [12, NULL_REVISION],
-            [13, NULL_REVISION],
-            [14, NULL_REVISION],
-            [4, NULL_REVISION],
-            [16, NULL_REVISION],
-            [17, NULL_REVISION],
-            [18, NULL_REVISION],
+            [3, FAKE_NULL_REVISION],
+            [12, FAKE_NULL_REVISION],
+            [13, FAKE_NULL_REVISION],
+            [14, FAKE_NULL_REVISION],
+            [4, FAKE_NULL_REVISION],
+            [16, FAKE_NULL_REVISION],
+            [17, FAKE_NULL_REVISION],
+            [18, FAKE_NULL_REVISION],
             [19, 11],
-            [20, NULL_REVISION],
-            [21, NULL_REVISION],
-            [22, NULL_REVISION],
-            [23, NULL_REVISION],
-            [2, NULL_REVISION],
-            [3, NULL_REVISION],
+            [20, FAKE_NULL_REVISION],
+            [21, FAKE_NULL_REVISION],
+            [22, FAKE_NULL_REVISION],
+            [23, FAKE_NULL_REVISION],
+            [2, FAKE_NULL_REVISION],
+            [3, FAKE_NULL_REVISION],
             [26, 24],
-            [27, NULL_REVISION],
-            [28, NULL_REVISION],
-            [12, NULL_REVISION],
-            [1, NULL_REVISION],
+            [27, FAKE_NULL_REVISION],
+            [28, FAKE_NULL_REVISION],
+            [12, FAKE_NULL_REVISION],
+            [1, FAKE_NULL_REVISION],
             [1, 9],
-            [32, NULL_REVISION],
-            [33, NULL_REVISION],
+            [32, FAKE_NULL_REVISION],
+            [33, FAKE_NULL_REVISION],
             [34, 31],
-            [35, NULL_REVISION],
+            [35, FAKE_NULL_REVISION],
             [36, 26],
-            [37, NULL_REVISION],
-            [38, NULL_REVISION],
-            [39, NULL_REVISION],
-            [40, NULL_REVISION],
-            [41, NULL_REVISION],
+            [37, FAKE_NULL_REVISION],
+            [38, FAKE_NULL_REVISION],
+            [39, FAKE_NULL_REVISION],
+            [40, FAKE_NULL_REVISION],
+            [41, FAKE_NULL_REVISION],
             [42, 26],
-            [0, NULL_REVISION],
-            [44, NULL_REVISION],
+            [0, FAKE_NULL_REVISION],
+            [44, FAKE_NULL_REVISION],
             [45, 4],
-            [40, NULL_REVISION],
-            [47, NULL_REVISION],
+            [40, FAKE_NULL_REVISION],
+            [47, FAKE_NULL_REVISION],
             [36, 0],
-            [49, NULL_REVISION],
-            [NULL_REVISION, NULL_REVISION],
-            [51, NULL_REVISION],
-            [52, NULL_REVISION],
-            [53, NULL_REVISION],
-            [14, NULL_REVISION],
-            [55, NULL_REVISION],
-            [15, NULL_REVISION],
-            [23, NULL_REVISION],
-            [58, NULL_REVISION],
-            [59, NULL_REVISION],
-            [2, NULL_REVISION],
+            [49, FAKE_NULL_REVISION],
+            [FAKE_NULL_REVISION, FAKE_NULL_REVISION],
+            [51, FAKE_NULL_REVISION],
+            [52, FAKE_NULL_REVISION],
+            [53, FAKE_NULL_REVISION],
+            [14, FAKE_NULL_REVISION],
+            [55, FAKE_NULL_REVISION],
+            [15, FAKE_NULL_REVISION],
+            [23, FAKE_NULL_REVISION],
+            [58, FAKE_NULL_REVISION],
+            [59, FAKE_NULL_REVISION],
+            [2, FAKE_NULL_REVISION],
             [61, 59],
-            [62, NULL_REVISION],
-            [63, NULL_REVISION],
-            [NULL_REVISION, NULL_REVISION],
-            [65, NULL_REVISION],
-            [66, NULL_REVISION],
-            [67, NULL_REVISION],
-            [68, NULL_REVISION],
+            [62, FAKE_NULL_REVISION],
+            [63, FAKE_NULL_REVISION],
+            [FAKE_NULL_REVISION, FAKE_NULL_REVISION],
+            [65, FAKE_NULL_REVISION],
+            [66, FAKE_NULL_REVISION],
+            [67, FAKE_NULL_REVISION],
+            [68, FAKE_NULL_REVISION],
             [37, 28],
             [69, 25],
-            [71, NULL_REVISION],
-            [72, NULL_REVISION],
+            [71, FAKE_NULL_REVISION],
+            [72, FAKE_NULL_REVISION],
             [50, 2],
-            [74, NULL_REVISION],
-            [12, NULL_REVISION],
-            [18, NULL_REVISION],
-            [77, NULL_REVISION],
-            [78, NULL_REVISION],
-            [79, NULL_REVISION],
+            [74, FAKE_NULL_REVISION],
+            [12, FAKE_NULL_REVISION],
+            [18, FAKE_NULL_REVISION],
+            [77, FAKE_NULL_REVISION],
+            [78, FAKE_NULL_REVISION],
+            [79, FAKE_NULL_REVISION],
             [43, 33],
-            [81, NULL_REVISION],
-            [82, NULL_REVISION],
-            [83, NULL_REVISION],
+            [81, FAKE_NULL_REVISION],
+            [82, FAKE_NULL_REVISION],
+            [83, FAKE_NULL_REVISION],
             [84, 45],
-            [85, NULL_REVISION],
-            [86, NULL_REVISION],
-            [NULL_REVISION, NULL_REVISION],
-            [88, NULL_REVISION],
-            [NULL_REVISION, NULL_REVISION],
+            [85, FAKE_NULL_REVISION],
+            [86, FAKE_NULL_REVISION],
+            [FAKE_NULL_REVISION, FAKE_NULL_REVISION],
+            [88, FAKE_NULL_REVISION],
+            [FAKE_NULL_REVISION, FAKE_NULL_REVISION],
             [76, 83],
-            [44, NULL_REVISION],
-            [92, NULL_REVISION],
-            [93, NULL_REVISION],
-            [9, NULL_REVISION],
+            [44, FAKE_NULL_REVISION],
+            [92, FAKE_NULL_REVISION],
+            [93, FAKE_NULL_REVISION],
+            [9, FAKE_NULL_REVISION],
             [95, 67],
-            [96, NULL_REVISION],
-            [97, NULL_REVISION],
-            [NULL_REVISION, NULL_REVISION],
-        ];
-        let problem_rev = 28 as Revision;
-        let problem_base = 70 as Revision;
+            [96, FAKE_NULL_REVISION],
+            [97, FAKE_NULL_REVISION],
+            [FAKE_NULL_REVISION, FAKE_NULL_REVISION],
+        ]
+        .into_iter()
+        .map(|[a, b]| [Revision(a), Revision(b)])
+        .collect();
+        let problem_rev = 28.into();
+        let problem_base = 70.into();
         // making the problem obvious: problem_rev is a parent of problem_base
         assert_eq!(graph.parents(problem_base).unwrap()[1], problem_rev);
 
@@ -687,14 +820,14 @@
                 graph,
                 [60, 26, 70, 3, 96, 19, 98, 49, 97, 47, 1, 6]
                     .iter()
-                    .cloned(),
+                    .map(|r| Revision(*r)),
             );
         assert!(missing_ancestors.bases.contains(&problem_base));
 
         let mut revs: HashSet<Revision> =
             [4, 12, 41, 28, 68, 38, 1, 30, 56, 44]
                 .iter()
-                .cloned()
+                .map(|r| Revision(*r))
                 .collect();
         missing_ancestors.remove_ancestors_from(&mut revs).unwrap();
         assert!(!revs.contains(&problem_rev));
--- a/rust/hg-core/src/config/config.rs	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/config/config_items.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,725 @@
+//! Code for parsing default Mercurial config items.
+use itertools::Itertools;
+use serde::Deserialize;
+
+use crate::{errors::HgError, exit_codes, FastHashMap};
+
+/// Corresponds to the structure of `mercurial/configitems.toml`.
+#[derive(Debug, Deserialize)]
+pub struct ConfigItems {
+    items: Vec<DefaultConfigItem>,
+    templates: FastHashMap<String, Vec<TemplateItem>>,
+    #[serde(rename = "template-applications")]
+    template_applications: Vec<TemplateApplication>,
+}
+
+/// Corresponds to a config item declaration in `mercurial/configitems.toml`.
+#[derive(Clone, Debug, PartialEq, Deserialize)]
+#[serde(try_from = "RawDefaultConfigItem")]
+pub struct DefaultConfigItem {
+    /// Section of the config the item is in (e.g. `[merge-tools]`)
+    section: String,
+    /// Name of the item (e.g. `meld.gui`)
+    name: String,
+    /// Default value (can be dynamic, see [`DefaultConfigItemType`])
+    default: Option<DefaultConfigItemType>,
+    /// If the config option is generic (e.g. `merge-tools.*`), defines
+    /// the priority of this item relative to other generic items.
+    /// If we're looking for <pattern>, then all generic items within the same
+    /// section will be sorted by order of priority, and the first regex match
+    /// against `name` is returned.
+    #[serde(default)]
+    priority: Option<isize>,
+    /// Aliases, if any. Each alias is a tuple of `(section, name)` for each
+    /// option that is aliased to this one.
+    #[serde(default)]
+    alias: Vec<(String, String)>,
+    /// Whether the config item is marked as experimental
+    #[serde(default)]
+    experimental: bool,
+    /// The (possibly empty) docstring for the item
+    #[serde(default)]
+    documentation: String,
+    /// Whether the item is part of an in-core extension. This allows us to
+    /// hide them if the extension is not enabled, to preserve legacy
+    /// behavior.
+    #[serde(default)]
+    in_core_extension: Option<String>,
+}
+
+/// Corresponds to the raw (i.e. on disk) structure of config items. Used as
+/// an intermediate step in deserialization.
+#[derive(Clone, Debug, Deserialize)]
+struct RawDefaultConfigItem {
+    section: String,
+    name: String,
+    default: Option<toml::Value>,
+    #[serde(rename = "default-type")]
+    default_type: Option<String>,
+    #[serde(default)]
+    priority: isize,
+    #[serde(default)]
+    generic: bool,
+    #[serde(default)]
+    alias: Vec<(String, String)>,
+    #[serde(default)]
+    experimental: bool,
+    #[serde(default)]
+    documentation: String,
+    #[serde(default)]
+    in_core_extension: Option<String>,
+}
+
+impl TryFrom<RawDefaultConfigItem> for DefaultConfigItem {
+    type Error = HgError;
+
+    fn try_from(value: RawDefaultConfigItem) -> Result<Self, Self::Error> {
+        Ok(Self {
+            section: value.section,
+            name: value.name,
+            default: raw_default_to_concrete(
+                value.default_type,
+                value.default,
+            )?,
+            priority: if value.generic {
+                Some(value.priority)
+            } else {
+                None
+            },
+            alias: value.alias,
+            experimental: value.experimental,
+            documentation: value.documentation,
+            in_core_extension: value.in_core_extension,
+        })
+    }
+}
+
+impl DefaultConfigItem {
+    fn is_generic(&self) -> bool {
+        self.priority.is_some()
+    }
+
+    pub fn in_core_extension(&self) -> Option<&str> {
+        self.in_core_extension.as_deref()
+    }
+
+    pub fn section(&self) -> &str {
+        self.section.as_ref()
+    }
+}
+
+impl<'a> TryFrom<&'a DefaultConfigItem> for Option<&'a str> {
+    type Error = HgError;
+
+    fn try_from(
+        value: &'a DefaultConfigItem,
+    ) -> Result<Option<&'a str>, Self::Error> {
+        match &value.default {
+            Some(default) => {
+                let err = HgError::abort(
+                    format!(
+                        "programming error: wrong query on config item '{}.{}'",
+                        value.section,
+                        value.name
+                    ),
+                    exit_codes::ABORT,
+                    Some(format!(
+                        "asked for '&str', type of default is '{}'",
+                        default.type_str()
+                    )),
+                );
+                match default {
+                    DefaultConfigItemType::Primitive(toml::Value::String(
+                        s,
+                    )) => Ok(Some(s)),
+                    _ => Err(err),
+                }
+            }
+            None => Ok(None),
+        }
+    }
+}
+
+impl<'a> TryFrom<&'a DefaultConfigItem> for Option<&'a [u8]> {
+    type Error = HgError;
+
+    fn try_from(
+        value: &'a DefaultConfigItem,
+    ) -> Result<Option<&'a [u8]>, Self::Error> {
+        match &value.default {
+            Some(default) => {
+                let err = HgError::abort(
+                    format!(
+                        "programming error: wrong query on config item '{}.{}'",
+                        value.section,
+                        value.name
+                    ),
+                    exit_codes::ABORT,
+                    Some(format!(
+                        "asked for bytes, type of default is '{}', \
+                        which cannot be interpreted as bytes",
+                        default.type_str()
+                    )),
+                );
+                match default {
+                    DefaultConfigItemType::Primitive(p) => {
+                        Ok(p.as_str().map(str::as_bytes))
+                    }
+                    _ => Err(err),
+                }
+            }
+            None => Ok(None),
+        }
+    }
+}
+
+impl TryFrom<&DefaultConfigItem> for Option<bool> {
+    type Error = HgError;
+
+    fn try_from(value: &DefaultConfigItem) -> Result<Self, Self::Error> {
+        match &value.default {
+            Some(default) => {
+                let err = HgError::abort(
+                    format!(
+                        "programming error: wrong query on config item '{}.{}'",
+                        value.section,
+                        value.name
+                    ),
+                    exit_codes::ABORT,
+                    Some(format!(
+                        "asked for 'bool', type of default is '{}'",
+                        default.type_str()
+                    )),
+                );
+                match default {
+                    DefaultConfigItemType::Primitive(
+                        toml::Value::Boolean(b),
+                    ) => Ok(Some(*b)),
+                    _ => Err(err),
+                }
+            }
+            None => Ok(None),
+        }
+    }
+}
+
+impl TryFrom<&DefaultConfigItem> for Option<u32> {
+    type Error = HgError;
+
+    fn try_from(value: &DefaultConfigItem) -> Result<Self, Self::Error> {
+        match &value.default {
+            Some(default) => {
+                let err = HgError::abort(
+                    format!(
+                        "programming error: wrong query on config item '{}.{}'",
+                        value.section,
+                        value.name
+                    ),
+                    exit_codes::ABORT,
+                    Some(format!(
+                        "asked for 'u32', type of default is '{}'",
+                        default.type_str()
+                    )),
+                );
+                match default {
+                    DefaultConfigItemType::Primitive(
+                        toml::Value::Integer(b),
+                    ) => {
+                        Ok(Some((*b).try_into().expect("TOML integer to u32")))
+                    }
+                    _ => Err(err),
+                }
+            }
+            None => Ok(None),
+        }
+    }
+}
+
+impl TryFrom<&DefaultConfigItem> for Option<u64> {
+    type Error = HgError;
+
+    fn try_from(value: &DefaultConfigItem) -> Result<Self, Self::Error> {
+        match &value.default {
+            Some(default) => {
+                let err = HgError::abort(
+                    format!(
+                        "programming error: wrong query on config item '{}.{}'",
+                        value.section,
+                        value.name
+                    ),
+                    exit_codes::ABORT,
+                    Some(format!(
+                        "asked for 'u64', type of default is '{}'",
+                        default.type_str()
+                    )),
+                );
+                match default {
+                    DefaultConfigItemType::Primitive(
+                        toml::Value::Integer(b),
+                    ) => {
+                        Ok(Some((*b).try_into().expect("TOML integer to u64")))
+                    }
+                    _ => Err(err),
+                }
+            }
+            None => Ok(None),
+        }
+    }
+}
+
+/// Allows abstracting over more complex default values than just primitives.
+/// The former `configitems.py` contained some dynamic code that is encoded
+/// in this enum.
+#[derive(Debug, PartialEq, Clone, Deserialize)]
+pub enum DefaultConfigItemType {
+    /// Some primitive type (string, integer, boolean)
+    Primitive(toml::Value),
+    /// A dynamic value that will be given by the code at runtime
+    Dynamic,
+    /// An lazily-returned array (possibly only relevant in the Python impl)
+    /// Example: `lambda: [b"zstd", b"zlib"]`
+    Lambda(Vec<String>),
+    /// For now, a special case for `web.encoding` that points to the
+    /// `encoding.encoding` module in the Python impl so that local encoding
+    /// is correctly resolved at runtime
+    LazyModule(String),
+    ListType,
+}
+
+impl DefaultConfigItemType {
+    pub fn type_str(&self) -> &str {
+        match self {
+            DefaultConfigItemType::Primitive(primitive) => {
+                primitive.type_str()
+            }
+            DefaultConfigItemType::Dynamic => "dynamic",
+            DefaultConfigItemType::Lambda(_) => "lambda",
+            DefaultConfigItemType::LazyModule(_) => "lazy_module",
+            DefaultConfigItemType::ListType => "list_type",
+        }
+    }
+}
+
+/// Most of the fields are shared with [`DefaultConfigItem`].
+#[derive(Debug, Clone, Deserialize)]
+#[serde(try_from = "RawTemplateItem")]
+struct TemplateItem {
+    suffix: String,
+    default: Option<DefaultConfigItemType>,
+    priority: Option<isize>,
+    #[serde(default)]
+    alias: Vec<(String, String)>,
+    #[serde(default)]
+    experimental: bool,
+    #[serde(default)]
+    documentation: String,
+}
+
+/// Corresponds to the raw (i.e. on disk) representation of a template item.
+/// Used as an intermediate step in deserialization.
+#[derive(Clone, Debug, Deserialize)]
+struct RawTemplateItem {
+    suffix: String,
+    default: Option<toml::Value>,
+    #[serde(rename = "default-type")]
+    default_type: Option<String>,
+    #[serde(default)]
+    priority: isize,
+    #[serde(default)]
+    generic: bool,
+    #[serde(default)]
+    alias: Vec<(String, String)>,
+    #[serde(default)]
+    experimental: bool,
+    #[serde(default)]
+    documentation: String,
+}
+
+impl TemplateItem {
+    fn into_default_item(
+        self,
+        application: TemplateApplication,
+    ) -> DefaultConfigItem {
+        DefaultConfigItem {
+            section: application.section,
+            name: application
+                .prefix
+                .map(|prefix| format!("{}.{}", prefix, self.suffix))
+                .unwrap_or(self.suffix),
+            default: self.default,
+            priority: self.priority,
+            alias: self.alias,
+            experimental: self.experimental,
+            documentation: self.documentation,
+            in_core_extension: None,
+        }
+    }
+}
+
+impl TryFrom<RawTemplateItem> for TemplateItem {
+    type Error = HgError;
+
+    fn try_from(value: RawTemplateItem) -> Result<Self, Self::Error> {
+        Ok(Self {
+            suffix: value.suffix,
+            default: raw_default_to_concrete(
+                value.default_type,
+                value.default,
+            )?,
+            priority: if value.generic {
+                Some(value.priority)
+            } else {
+                None
+            },
+            alias: value.alias,
+            experimental: value.experimental,
+            documentation: value.documentation,
+        })
+    }
+}
+
+/// Transforms the on-disk string-based representation of complex default types
+/// to the concrete [`DefaultconfigItemType`].
+fn raw_default_to_concrete(
+    default_type: Option<String>,
+    default: Option<toml::Value>,
+) -> Result<Option<DefaultConfigItemType>, HgError> {
+    Ok(match default_type.as_deref() {
+        None => default.as_ref().map(|default| {
+            DefaultConfigItemType::Primitive(default.to_owned())
+        }),
+        Some("dynamic") => Some(DefaultConfigItemType::Dynamic),
+        Some("list_type") => Some(DefaultConfigItemType::ListType),
+        Some("lambda") => match &default {
+            Some(default) => Some(DefaultConfigItemType::Lambda(
+                default.to_owned().try_into().map_err(|e| {
+                    HgError::abort(
+                        e.to_string(),
+                        exit_codes::ABORT,
+                        Some("Check 'mercurial/configitems.toml'".into()),
+                    )
+                })?,
+            )),
+            None => {
+                return Err(HgError::abort(
+                    "lambda defined with no return value".to_string(),
+                    exit_codes::ABORT,
+                    Some("Check 'mercurial/configitems.toml'".into()),
+                ))
+            }
+        },
+        Some("lazy_module") => match &default {
+            Some(default) => {
+                Some(DefaultConfigItemType::LazyModule(match default {
+                    toml::Value::String(module) => module.to_owned(),
+                    _ => {
+                        return Err(HgError::abort(
+                            "lazy_module module name should be a string"
+                                .to_string(),
+                            exit_codes::ABORT,
+                            Some("Check 'mercurial/configitems.toml'".into()),
+                        ))
+                    }
+                }))
+            }
+            None => {
+                return Err(HgError::abort(
+                    "lazy_module should have a default value".to_string(),
+                    exit_codes::ABORT,
+                    Some("Check 'mercurial/configitems.toml'".into()),
+                ))
+            }
+        },
+        Some(invalid) => {
+            return Err(HgError::abort(
+                format!("invalid default_type '{}'", invalid),
+                exit_codes::ABORT,
+                Some("Check 'mercurial/configitems.toml'".into()),
+            ))
+        }
+    })
+}
+
+#[derive(Debug, Clone, Deserialize)]
+struct TemplateApplication {
+    template: String,
+    section: String,
+    #[serde(default)]
+    prefix: Option<String>,
+}
+
+/// Represents the (dynamic) set of default core Mercurial config items from
+/// `mercurial/configitems.toml`.
+#[derive(Clone, Debug, Default)]
+pub struct DefaultConfig {
+    /// Mapping of section -> (mapping of name -> item)
+    items: FastHashMap<String, FastHashMap<String, DefaultConfigItem>>,
+}
+
+impl DefaultConfig {
+    pub fn empty() -> DefaultConfig {
+        Self {
+            items: Default::default(),
+        }
+    }
+
+    /// Returns `Self`, given the contents of `mercurial/configitems.toml`
+    #[logging_timer::time("trace")]
+    pub fn from_contents(contents: &str) -> Result<Self, HgError> {
+        let mut from_file: ConfigItems =
+            toml::from_str(contents).map_err(|e| {
+                HgError::abort(
+                    e.to_string(),
+                    exit_codes::ABORT,
+                    Some("Check 'mercurial/configitems.toml'".into()),
+                )
+            })?;
+
+        let mut flat_items = from_file.items;
+
+        for application in from_file.template_applications.drain(..) {
+            match from_file.templates.get(&application.template) {
+                None => return Err(
+                    HgError::abort(
+                        format!(
+                            "template application refers to undefined template '{}'",
+                            application.template
+                        ),
+                        exit_codes::ABORT,
+                        Some("Check 'mercurial/configitems.toml'".into())
+                    )
+                ),
+                Some(template_items) => {
+                    for template_item in template_items {
+                        flat_items.push(
+                            template_item
+                                .clone()
+                                .into_default_item(application.clone()),
+                        )
+                    }
+                }
+            };
+        }
+
+        let items = flat_items.into_iter().fold(
+            FastHashMap::default(),
+            |mut acc, item| {
+                acc.entry(item.section.to_owned())
+                    .or_insert_with(|| {
+                        let mut section = FastHashMap::default();
+                        section.insert(item.name.to_owned(), item.to_owned());
+                        section
+                    })
+                    .insert(item.name.to_owned(), item);
+                acc
+            },
+        );
+
+        Ok(Self { items })
+    }
+
+    /// Return the default config item that matches `section` and `item`.
+    pub fn get(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Option<&DefaultConfigItem> {
+        // Core items must be valid UTF-8
+        let section = String::from_utf8_lossy(section);
+        let section_map = self.items.get(section.as_ref())?;
+        let item_name_lossy = String::from_utf8_lossy(item);
+        match section_map.get(item_name_lossy.as_ref()) {
+            Some(item) => Some(item),
+            None => {
+                for generic_item in section_map
+                    .values()
+                    .filter(|item| item.is_generic())
+                    .sorted_by_key(|item| match item.priority {
+                        Some(priority) => (priority, &item.name),
+                        _ => unreachable!(),
+                    })
+                {
+                    if regex::bytes::Regex::new(&generic_item.name)
+                        .expect("invalid regex in configitems")
+                        .is_match(item)
+                    {
+                        return Some(generic_item);
+                    }
+                }
+                None
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::config::config_items::{
+        DefaultConfigItem, DefaultConfigItemType,
+    };
+
+    use super::DefaultConfig;
+
+    #[test]
+    fn test_config_read() {
+        let contents = r#"
+[[items]]
+section = "alias"
+name = "abcd.*"
+default = 3
+generic = true
+priority = -1
+
+[[items]]
+section = "alias"
+name = ".*"
+default-type = "dynamic"
+generic = true
+
+[[items]]
+section = "cmdserver"
+name = "track-log"
+default-type = "lambda"
+default = [ "chgserver", "cmdserver", "repocache",]
+
+[[items]]
+section = "chgserver"
+name = "idletimeout"
+default = 3600
+
+[[items]]
+section = "cmdserver"
+name = "message-encodings"
+default-type = "list_type"
+
+[[items]]
+section = "web"
+name = "encoding"
+default-type = "lazy_module"
+default = "encoding.encoding"
+
+[[items]]
+section = "command-templates"
+name = "graphnode"
+alias = [["ui", "graphnodetemplate"]]
+documentation = """This is a docstring.
+This is another line \
+but this is not."""
+
+[[items]]
+section = "censor"
+name = "policy"
+default = "abort"
+experimental = true
+
+[[template-applications]]
+template = "diff-options"
+section = "commands"
+prefix = "revert.interactive"
+
+[[template-applications]]
+template = "diff-options"
+section = "diff"
+
+[templates]
+[[templates.diff-options]]
+suffix = "nodates"
+default = false
+
+[[templates.diff-options]]
+suffix = "showfunc"
+default = false
+
+[[templates.diff-options]]
+suffix = "unified"
+"#;
+        let res = DefaultConfig::from_contents(contents);
+        let config = match res {
+            Ok(config) => config,
+            Err(e) => panic!("{}", e),
+        };
+        let expected = DefaultConfigItem {
+            section: "censor".into(),
+            name: "policy".into(),
+            default: Some(DefaultConfigItemType::Primitive("abort".into())),
+            priority: None,
+            alias: vec![],
+            experimental: true,
+            documentation: "".into(),
+            in_core_extension: None,
+        };
+        assert_eq!(config.get(b"censor", b"policy"), Some(&expected));
+
+        // Test generic priority. The `.*` pattern is wider than `abcd.*`, but
+        // `abcd.*` has priority, so it should match first.
+        let expected = DefaultConfigItem {
+            section: "alias".into(),
+            name: "abcd.*".into(),
+            default: Some(DefaultConfigItemType::Primitive(3.into())),
+            priority: Some(-1),
+            alias: vec![],
+            experimental: false,
+            documentation: "".into(),
+            in_core_extension: None,
+        };
+        assert_eq!(config.get(b"alias", b"abcdsomething"), Some(&expected));
+
+        //... but if it doesn't, we should fallback to `.*`
+        let expected = DefaultConfigItem {
+            section: "alias".into(),
+            name: ".*".into(),
+            default: Some(DefaultConfigItemType::Dynamic),
+            priority: Some(0),
+            alias: vec![],
+            experimental: false,
+            documentation: "".into(),
+            in_core_extension: None,
+        };
+        assert_eq!(config.get(b"alias", b"something"), Some(&expected));
+
+        let expected = DefaultConfigItem {
+            section: "chgserver".into(),
+            name: "idletimeout".into(),
+            default: Some(DefaultConfigItemType::Primitive(3600.into())),
+            priority: None,
+            alias: vec![],
+            experimental: false,
+            documentation: "".into(),
+            in_core_extension: None,
+        };
+        assert_eq!(config.get(b"chgserver", b"idletimeout"), Some(&expected));
+
+        let expected = DefaultConfigItem {
+            section: "cmdserver".into(),
+            name: "track-log".into(),
+            default: Some(DefaultConfigItemType::Lambda(vec![
+                "chgserver".into(),
+                "cmdserver".into(),
+                "repocache".into(),
+            ])),
+            priority: None,
+            alias: vec![],
+            experimental: false,
+            documentation: "".into(),
+            in_core_extension: None,
+        };
+        assert_eq!(config.get(b"cmdserver", b"track-log"), Some(&expected));
+
+        let expected = DefaultConfigItem {
+            section: "command-templates".into(),
+            name: "graphnode".into(),
+            default: None,
+            priority: None,
+            alias: vec![("ui".into(), "graphnodetemplate".into())],
+            experimental: false,
+            documentation:
+                "This is a docstring.\nThis is another line but this is not."
+                    .into(),
+            in_core_extension: None,
+        };
+        assert_eq!(
+            config.get(b"command-templates", b"graphnode"),
+            Some(&expected)
+        );
+    }
+}
--- a/rust/hg-core/src/config/layer.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/config/layer.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -107,7 +107,7 @@
     ) {
         self.sections
             .entry(section)
-            .or_insert_with(HashMap::new)
+            .or_default()
             .insert(item, ConfigValue { bytes: value, line });
     }
 
@@ -178,7 +178,7 @@
                     .expect("Path::parent fail on a file we’ve read");
                 // `Path::join` with an absolute argument correctly ignores the
                 // base path
-                let filename = dir.join(&get_path_from_bytes(&filename_bytes));
+                let filename = dir.join(get_path_from_bytes(&filename_bytes));
                 match std::fs::read(&filename) {
                     Ok(data) => {
                         layers.push(current_layer);
@@ -304,8 +304,9 @@
     CommandLineColor,
     /// From environment variables like `$PAGER` or `$EDITOR`
     Environment(Vec<u8>),
-    /* TODO defaults (configitems.py)
-     * TODO extensions
+    /// From configitems.toml
+    Defaults,
+    /* TODO extensions
      * TODO Python resources?
      * Others? */
 }
@@ -323,6 +324,9 @@
             ConfigOrigin::Tweakdefaults => {
                 write_bytes!(out, b"ui.tweakdefaults")
             }
+            ConfigOrigin::Defaults => {
+                write_bytes!(out, b"configitems.toml")
+            }
         }
     }
 }
--- a/rust/hg-core/src/config/mod.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/config/mod.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -9,14 +9,19 @@
 
 //! Mercurial config parsing and interfaces.
 
+pub mod config_items;
 mod layer;
 mod plain_info;
 mod values;
 pub use layer::{ConfigError, ConfigOrigin, ConfigParseError};
+use lazy_static::lazy_static;
 pub use plain_info::PlainInfo;
 
+use self::config_items::DefaultConfig;
+use self::config_items::DefaultConfigItem;
 use self::layer::ConfigLayer;
 use self::layer::ConfigValue;
+use crate::errors::HgError;
 use crate::errors::{HgResultExt, IoResultExt};
 use crate::utils::files::get_bytes_from_os_str;
 use format_bytes::{write_bytes, DisplayBytes};
@@ -26,6 +31,14 @@
 use std::path::{Path, PathBuf};
 use std::str;
 
+lazy_static! {
+    static ref DEFAULT_CONFIG: Result<DefaultConfig, HgError> = {
+        DefaultConfig::from_contents(include_str!(
+            "../../../../mercurial/configitems.toml"
+        ))
+    };
+}
+
 /// Holds the config values for the current repository
 /// TODO update this docstring once we support more sources
 #[derive(Clone)]
@@ -347,13 +360,50 @@
         self.plain = plain;
     }
 
+    /// Returns the default value for the given config item, if any.
+    pub fn get_default(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<&DefaultConfigItem>, HgError> {
+        let default_config = DEFAULT_CONFIG.as_ref().map_err(|e| {
+            HgError::abort(
+                e.to_string(),
+                crate::exit_codes::ABORT,
+                Some("`mercurial/configitems.toml` is not valid".into()),
+            )
+        })?;
+        let default_opt = default_config.get(section, item);
+        Ok(default_opt.filter(|default| {
+            default
+                .in_core_extension()
+                .map(|extension| {
+                    // Only return the default for an in-core extension item
+                    // if said extension is enabled
+                    self.is_extension_enabled(extension.as_bytes())
+                })
+                .unwrap_or(true)
+        }))
+    }
+
+    /// Return the config item that corresponds to a section + item, a function
+    /// to parse from the raw bytes to the expected type (which is passed as
+    /// a string only to make debugging easier).
+    /// Used by higher-level methods like `get_bool`.
+    ///
+    /// `fallback_to_default` controls whether the default value (if any) is
+    /// returned if nothing is found.
     fn get_parse<'config, T: 'config>(
         &'config self,
         section: &[u8],
         item: &[u8],
         expected_type: &'static str,
         parse: impl Fn(&'config [u8]) -> Option<T>,
-    ) -> Result<Option<T>, ConfigValueParseError> {
+        fallback_to_default: bool,
+    ) -> Result<Option<T>, HgError>
+    where
+        Option<T>: TryFrom<&'config DefaultConfigItem, Error = HgError>,
+    {
         match self.get_inner(section, item) {
             Some((layer, v)) => match parse(&v.bytes) {
                 Some(b) => Ok(Some(b)),
@@ -364,22 +414,105 @@
                     section: section.to_owned(),
                     item: item.to_owned(),
                     expected_type,
-                })),
+                })
+                .into()),
             },
-            None => Ok(None),
+            None => {
+                if !fallback_to_default {
+                    return Ok(None);
+                }
+                match self.get_default(section, item)? {
+                    Some(default) => {
+                        // Defaults are TOML values, so they're not in the same
+                        // shape as in the config files.
+                        // First try to convert directly to the expected type
+                        let as_t = default.try_into();
+                        match as_t {
+                            Ok(t) => Ok(t),
+                            Err(e) => {
+                                // If it fails, it means that...
+                                let as_bytes: Result<Option<&[u8]>, _> =
+                                    default.try_into();
+                                match as_bytes {
+                                    Ok(bytes_opt) => {
+                                        if let Some(bytes) = bytes_opt {
+                                            // ...we should be able to parse it
+                                            return Ok(parse(bytes));
+                                        }
+                                        Err(e)
+                                    }
+                                    Err(_) => Err(e),
+                                }
+                            }
+                        }
+                    }
+                    None => {
+                        self.print_devel_warning(section, item)?;
+                        Ok(None)
+                    }
+                }
+            }
         }
     }
 
+    fn print_devel_warning(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<(), HgError> {
+        let warn_all = self.get_bool(b"devel", b"all-warnings")?;
+        let warn_specific = self.get_bool(b"devel", b"warn-config-unknown")?;
+        if !warn_all || !warn_specific {
+            // We technically shouldn't print anything here since it's not
+            // the concern of `hg-core`.
+            //
+            // We're printing directly to stderr since development warnings
+            // are not on by default and surfacing this to consumer crates
+            // (like `rhg`) would be more difficult, probably requiring
+            // something à la `log` crate.
+            //
+            // TODO maybe figure out a way of exposing a "warnings" channel
+            // that consumer crates can hook into. It would be useful for
+            // all other warnings that `hg-core` could expose.
+            eprintln!(
+                "devel-warn: accessing unregistered config item: '{}.{}'",
+                String::from_utf8_lossy(section),
+                String::from_utf8_lossy(item),
+            );
+        }
+        Ok(())
+    }
+
     /// Returns an `Err` if the first value found is not a valid UTF-8 string.
     /// Otherwise, returns an `Ok(value)` if found, or `None`.
     pub fn get_str(
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<Option<&str>, ConfigValueParseError> {
-        self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
-            str::from_utf8(value).ok()
-        })
+    ) -> Result<Option<&str>, HgError> {
+        self.get_parse(
+            section,
+            item,
+            "ASCII or UTF-8 string",
+            |value| str::from_utf8(value).ok(),
+            true,
+        )
+    }
+
+    /// Same as `get_str`, but doesn't fall back to the default `configitem`
+    /// if not defined in the user config.
+    pub fn get_str_no_default(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<&str>, HgError> {
+        self.get_parse(
+            section,
+            item,
+            "ASCII or UTF-8 string",
+            |value| str::from_utf8(value).ok(),
+            false,
+        )
     }
 
     /// Returns an `Err` if the first value found is not a valid unsigned
@@ -388,10 +521,14 @@
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<Option<u32>, ConfigValueParseError> {
-        self.get_parse(section, item, "valid integer", |value| {
-            str::from_utf8(value).ok()?.parse().ok()
-        })
+    ) -> Result<Option<u32>, HgError> {
+        self.get_parse(
+            section,
+            item,
+            "valid integer",
+            |value| str::from_utf8(value).ok()?.parse().ok(),
+            true,
+        )
     }
 
     /// Returns an `Err` if the first value found is not a valid file size
@@ -401,8 +538,14 @@
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<Option<u64>, ConfigValueParseError> {
-        self.get_parse(section, item, "byte quantity", values::parse_byte_size)
+    ) -> Result<Option<u64>, HgError> {
+        self.get_parse(
+            section,
+            item,
+            "byte quantity",
+            values::parse_byte_size,
+            true,
+        )
     }
 
     /// Returns an `Err` if the first value found is not a valid boolean.
@@ -412,8 +555,18 @@
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<Option<bool>, ConfigValueParseError> {
-        self.get_parse(section, item, "boolean", values::parse_bool)
+    ) -> Result<Option<bool>, HgError> {
+        self.get_parse(section, item, "boolean", values::parse_bool, true)
+    }
+
+    /// Same as `get_option`, but doesn't fall back to the default `configitem`
+    /// if not defined in the user config.
+    pub fn get_option_no_default(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<Option<bool>, HgError> {
+        self.get_parse(section, item, "boolean", values::parse_bool, false)
     }
 
     /// Returns the corresponding boolean in the config. Returns `Ok(false)`
@@ -422,10 +575,20 @@
         &self,
         section: &[u8],
         item: &[u8],
-    ) -> Result<bool, ConfigValueParseError> {
+    ) -> Result<bool, HgError> {
         Ok(self.get_option(section, item)?.unwrap_or(false))
     }
 
+    /// Same as `get_bool`, but doesn't fall back to the default `configitem`
+    /// if not defined in the user config.
+    pub fn get_bool_no_default(
+        &self,
+        section: &[u8],
+        item: &[u8],
+    ) -> Result<bool, HgError> {
+        Ok(self.get_option_no_default(section, item)?.unwrap_or(false))
+    }
+
     /// Returns `true` if the extension is enabled, `false` otherwise
     pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
         let value = self.get(b"extensions", extension);
@@ -595,7 +758,7 @@
         let tmpdir = tempfile::tempdir().unwrap();
         let tmpdir_path = tmpdir.path();
         let mut included_file =
-            File::create(&tmpdir_path.join("included.rc")).unwrap();
+            File::create(tmpdir_path.join("included.rc")).unwrap();
 
         included_file.write_all(b"[section]\nitem=value1").unwrap();
         let base_config_path = tmpdir_path.join("base.rc");
@@ -633,4 +796,15 @@
         assert!(config.get_u32(b"section2", b"not-count").is_err());
         assert!(config.get_byte_size(b"section2", b"not-size").is_err());
     }
+
+    #[test]
+    fn test_default_parse() {
+        let config = Config::load_from_explicit_sources(vec![])
+            .expect("expected valid config");
+        let ret = config.get_byte_size(b"cmdserver", b"max-log-size");
+        assert!(ret.is_ok(), "{:?}", ret);
+
+        let ret = config.get_byte_size(b"ui", b"formatted");
+        assert!(ret.unwrap().is_none());
+    }
 }
--- a/rust/hg-core/src/copy_tracing/tests.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/copy_tracing/tests.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -1,5 +1,12 @@
 use super::*;
 
+/// Shorthand to reduce boilerplate when creating [`Revision`] for testing
+macro_rules! R {
+    ($revision:literal) => {
+        Revision($revision)
+    };
+}
+
 /// Unit tests for:
 ///
 /// ```ignore
@@ -27,7 +34,12 @@
     use MergePick::*;
 
     assert_eq!(
-        compare_value!(1, Normal, (1, None, { 1 }), (1, None, { 1 })),
+        compare_value!(
+            R!(1),
+            Normal,
+            (R!(1), None, { R!(1) }),
+            (R!(1), None, { R!(1) })
+        ),
         (Any, false)
     );
 }
@@ -70,12 +82,12 @@
 
     assert_eq!(
         merge_copies_dict!(
-            1,
-            {"foo" => (1, None, {})},
+            R!(1),
+            {"foo" => (R!(1), None, {})},
             {},
             {"foo" => Merged}
         ),
-        internal_path_copies!("foo" => (1, None, {}))
+        internal_path_copies!("foo" => (R!(1), None, {}))
     );
 }
 
@@ -124,17 +136,29 @@
 
     assert_eq!(
         combine_changeset_copies!(
-            { 1 => 1, 2 => 1 },
+            { R!(1) => 1, R!(2) => 1 },
             [
-                { rev: 1, p1: NULL, p2: NULL, actions: [], merge_cases: {}, },
-                { rev: 2, p1: NULL, p2: NULL, actions: [], merge_cases: {}, },
+                {
+                    rev: R!(1),
+                    p1: NULL,
+                    p2: NULL,
+                    actions: [],
+                    merge_cases: {},
+                },
                 {
-                    rev: 3, p1: 1, p2: 2,
+                    rev: R!(2),
+                    p1: NULL,
+                    p2: NULL,
+                    actions: [],
+                    merge_cases: {},
+                },
+                {
+                    rev: R!(3), p1: R!(1), p2: R!(2),
                     actions: [CopiedFromP1("destination.txt", "source.txt")],
                     merge_cases: {"destination.txt" => Merged},
                 },
             ],
-            3,
+            R!(3),
         ),
         path_copies!("destination.txt" => "source.txt")
     );
--- a/rust/hg-core/src/dagops.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/dagops.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -171,14 +171,15 @@
 mod tests {
 
     use super::*;
-    use crate::testing::SampleGraph;
+    use crate::{testing::SampleGraph, BaseRevision};
 
     /// Apply `retain_heads()` to the given slice and return as a sorted `Vec`
     fn retain_heads_sorted(
         graph: &impl Graph,
-        revs: &[Revision],
+        revs: &[BaseRevision],
     ) -> Result<Vec<Revision>, GraphError> {
-        let mut revs: HashSet<Revision> = revs.iter().cloned().collect();
+        let mut revs: HashSet<Revision> =
+            revs.iter().cloned().map(Revision).collect();
         retain_heads(graph, &mut revs)?;
         let mut as_vec: Vec<Revision> = revs.iter().cloned().collect();
         as_vec.sort_unstable();
@@ -202,9 +203,10 @@
     /// Apply `heads()` to the given slice and return as a sorted `Vec`
     fn heads_sorted(
         graph: &impl Graph,
-        revs: &[Revision],
+        revs: &[BaseRevision],
     ) -> Result<Vec<Revision>, GraphError> {
-        let heads = heads(graph, revs.iter())?;
+        let iter_revs: Vec<_> = revs.iter().cloned().map(Revision).collect();
+        let heads = heads(graph, iter_revs.iter())?;
         let mut as_vec: Vec<Revision> = heads.iter().cloned().collect();
         as_vec.sort_unstable();
         Ok(as_vec)
@@ -227,9 +229,9 @@
     /// Apply `roots()` and sort the result for easier comparison
     fn roots_sorted(
         graph: &impl Graph,
-        revs: &[Revision],
+        revs: &[BaseRevision],
     ) -> Result<Vec<Revision>, GraphError> {
-        let set: HashSet<_> = revs.iter().cloned().collect();
+        let set: HashSet<_> = revs.iter().cloned().map(Revision).collect();
         let mut as_vec = roots(graph, &set)?;
         as_vec.sort_unstable();
         Ok(as_vec)
@@ -252,17 +254,24 @@
     /// Apply `range()` and convert the result into a Vec for easier comparison
     fn range_vec(
         graph: impl Graph + Clone,
-        roots: &[Revision],
-        heads: &[Revision],
+        roots: &[BaseRevision],
+        heads: &[BaseRevision],
     ) -> Result<Vec<Revision>, GraphError> {
-        range(&graph, roots.iter().cloned(), heads.iter().cloned())
-            .map(|bs| bs.into_iter().collect())
+        range(
+            &graph,
+            roots.iter().cloned().map(Revision),
+            heads.iter().cloned().map(Revision),
+        )
+        .map(|bs| bs.into_iter().collect())
     }
 
     #[test]
     fn test_range() -> Result<(), GraphError> {
         assert_eq!(range_vec(SampleGraph, &[0], &[4])?, vec![0, 1, 2, 4]);
-        assert_eq!(range_vec(SampleGraph, &[0], &[8])?, vec![]);
+        assert_eq!(
+            range_vec(SampleGraph, &[0], &[8])?,
+            Vec::<Revision>::new()
+        );
         assert_eq!(
             range_vec(SampleGraph, &[5, 6], &[10, 11, 13])?,
             vec![5, 10]
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -62,7 +62,7 @@
     /// Initializes the multiset from a manifest.
     pub fn from_manifest(
         manifest: &[impl AsRef<HgPath>],
-    ) -> Result<Self, DirstateMapError> {
+    ) -> Result<Self, HgPathError> {
         let mut multiset = DirsMultiset {
             inner: FastHashMap::default(),
         };
@@ -80,19 +80,17 @@
     pub fn add_path(
         &mut self,
         path: impl AsRef<HgPath>,
-    ) -> Result<(), DirstateMapError> {
+    ) -> Result<(), HgPathError> {
         for subpath in files::find_dirs(path.as_ref()) {
             if subpath.as_bytes().last() == Some(&b'/') {
                 // TODO Remove this once PathAuditor is certified
                 // as the only entrypoint for path data
                 let second_slash_index = subpath.len() - 1;
 
-                return Err(DirstateMapError::InvalidPath(
-                    HgPathError::ConsecutiveSlashes {
-                        bytes: path.as_ref().as_bytes().to_owned(),
-                        second_slash_index,
-                    },
-                ));
+                return Err(HgPathError::ConsecutiveSlashes {
+                    bytes: path.as_ref().as_bytes().to_owned(),
+                    second_slash_index,
+                });
             }
             if let Some(val) = self.inner.get_mut(subpath) {
                 *val += 1;
--- a/rust/hg-core/src/dirstate/status.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/dirstate/status.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -20,7 +20,7 @@
 
 /// Wrong type of file from a `BadMatch`
 /// Note: a lot of those don't exist on all platforms.
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
 pub enum BadType {
     CharacterDevice,
     BlockDevice,
--- a/rust/hg-core/src/dirstate_tree/dirstate_map.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -178,7 +178,7 @@
             ChildNodes::InMemory(nodes) => Ok(nodes),
             ChildNodes::OnDisk(nodes) => {
                 *unreachable_bytes +=
-                    std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
+                    std::mem::size_of_val::<[on_disk::Node]>(*nodes) as u32;
                 let nodes = nodes
                     .iter()
                     .map(|node| {
@@ -579,6 +579,14 @@
         }
     }
 
+    pub fn has_node(
+        &self,
+        path: &HgPath,
+    ) -> Result<bool, DirstateV2ParseError> {
+        let node = self.get_node(path)?;
+        Ok(node.is_some())
+    }
+
     /// Returns a mutable reference to the node at `path` if it exists
     ///
     /// `each_ancestor` is a callback that is called for each ancestor node
@@ -756,7 +764,7 @@
     ) -> Result<bool, DirstateV2ParseError> {
         let was_tracked = old_entry_opt.map_or(false, |e| e.tracked());
         let had_entry = old_entry_opt.is_some();
-        let tracked_count_increment = if was_tracked { 0 } else { 1 };
+        let tracked_count_increment = u32::from(!was_tracked);
         let mut new = false;
 
         let node = self.get_or_insert_node(filename, |ancestor| {
--- a/rust/hg-core/src/dirstate_tree/on_disk.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/on_disk.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -414,6 +414,8 @@
     }
 
     fn synthesize_unix_mode(&self) -> u32 {
+        // Some platforms' libc don't have the same type (MacOS uses i32 here)
+        #[allow(clippy::unnecessary_cast)]
         let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
             libc::S_IFLNK as u32
         } else {
@@ -529,6 +531,8 @@
         flags.set(Flags::WDIR_TRACKED, wc_tracked);
         flags.set(Flags::P1_TRACKED, p1_tracked);
         flags.set(Flags::P2_INFO, p2_info);
+        // Some platforms' libc don't have the same type (MacOS uses i32 here)
+        #[allow(clippy::unnecessary_cast)]
         let size = if let Some((m, s)) = mode_size_opt {
             let exec_perm = m & (libc::S_IXUSR as u32) != 0;
             let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
--- a/rust/hg-core/src/dirstate_tree/status.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/dirstate_tree/status.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -8,12 +8,14 @@
 use crate::dirstate_tree::dirstate_map::NodeRef;
 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
 use crate::matchers::get_ignore_function;
-use crate::matchers::Matcher;
+use crate::matchers::{Matcher, VisitChildrenSet};
 use crate::utils::files::get_bytes_from_os_string;
 use crate::utils::files::get_bytes_from_path;
 use crate::utils::files::get_path_from_bytes;
+use crate::utils::hg_path::hg_path_to_path_buf;
 use crate::utils::hg_path::HgPath;
 use crate::BadMatch;
+use crate::BadType;
 use crate::DirstateStatus;
 use crate::HgPathCow;
 use crate::PatternFileWarning;
@@ -24,6 +26,7 @@
 use sha1::{Digest, Sha1};
 use std::borrow::Cow;
 use std::io;
+use std::os::unix::prelude::FileTypeExt;
 use std::path::Path;
 use std::path::PathBuf;
 use std::sync::Mutex;
@@ -155,6 +158,18 @@
         root_cached_mtime,
         is_at_repo_root,
     )?;
+    if let Some(file_set) = common.matcher.file_set() {
+        for file in file_set {
+            if !file.is_empty() && !dmap.has_node(file)? {
+                let path = hg_path_to_path_buf(file)?;
+                if let io::Result::Err(error) =
+                    root_dir.join(path).symlink_metadata()
+                {
+                    common.io_error(error, file)
+                }
+            }
+        }
+    }
     let mut outcome = common.outcome.into_inner().unwrap();
     let new_cacheable = common.new_cacheable_directories.into_inner().unwrap();
     let outdated = common.outdated_cached_directories.into_inner().unwrap();
@@ -234,7 +249,7 @@
         }
     }
 
-    fn force<'b>(&self, ignore_fn: &IgnoreFnType<'b>) -> bool {
+    fn force(&self, ignore_fn: &IgnoreFnType<'_>) -> bool {
         match self.parent {
             None => false,
             Some(parent) => {
@@ -367,6 +382,16 @@
         false
     }
 
+    fn should_visit(set: &VisitChildrenSet, basename: &HgPath) -> bool {
+        match set {
+            VisitChildrenSet::This | VisitChildrenSet::Recursive => true,
+            VisitChildrenSet::Empty => false,
+            VisitChildrenSet::Set(children_to_visit) => {
+                children_to_visit.contains(basename)
+            }
+        }
+    }
+
     /// Returns whether all child entries of the filesystem directory have a
     /// corresponding dirstate node or are ignored.
     fn traverse_fs_directory_and_dirstate<'ancestor>(
@@ -378,21 +403,27 @@
         cached_directory_mtime: Option<TruncatedTimestamp>,
         is_at_repo_root: bool,
     ) -> Result<bool, DirstateV2ParseError> {
+        let children_set = self.matcher.visit_children_set(directory_hg_path);
+        if let VisitChildrenSet::Empty = children_set {
+            return Ok(false);
+        }
         if self.can_skip_fs_readdir(directory_entry, cached_directory_mtime) {
             dirstate_nodes
                 .par_iter()
                 .map(|dirstate_node| {
                     let fs_path = &directory_entry.fs_path;
-                    let fs_path = fs_path.join(get_path_from_bytes(
-                        dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
-                    ));
+                    let basename =
+                        dirstate_node.base_name(self.dmap.on_disk)?.as_bytes();
+                    let fs_path = fs_path.join(get_path_from_bytes(basename));
+                    if !Self::should_visit(
+                        &children_set,
+                        HgPath::new(basename),
+                    ) {
+                        return Ok(());
+                    }
                     match std::fs::symlink_metadata(&fs_path) {
                         Ok(fs_metadata) => {
-                            let file_type =
-                                match fs_metadata.file_type().try_into() {
-                                    Ok(file_type) => file_type,
-                                    Err(_) => return Ok(()),
-                                };
+                            let file_type = fs_metadata.file_type().into();
                             let entry = DirEntry {
                                 hg_path: Cow::Borrowed(
                                     dirstate_node
@@ -472,6 +503,15 @@
         .par_bridge()
         .map(|pair| {
             use itertools::EitherOrBoth::*;
+            let basename = match &pair {
+                Left(dirstate_node) | Both(dirstate_node, _) => HgPath::new(
+                    dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
+                ),
+                Right(fs_entry) => &fs_entry.hg_path,
+            };
+            if !Self::should_visit(&children_set, basename) {
+                return Ok(false);
+            }
             let has_dirstate_node_or_is_ignored = match pair {
                 Both(dirstate_node, fs_entry) => {
                     self.traverse_fs_and_dirstate(
@@ -513,6 +553,15 @@
             // replaced by a directory or something else.
             self.mark_removed_or_deleted_if_file(&dirstate_node)?;
         }
+        if let Some(bad_type) = fs_entry.is_bad() {
+            if self.matcher.exact_match(hg_path) {
+                let path = dirstate_node.full_path(self.dmap.on_disk)?;
+                self.outcome.lock().unwrap().bad.push((
+                    path.to_owned().into(),
+                    BadMatch::BadType(bad_type),
+                ))
+            }
+        }
         if fs_entry.is_dir() {
             if self.options.collect_traversed_dirs {
                 self.outcome
@@ -866,21 +915,27 @@
     File,
     Directory,
     Symlink,
+    BadType(BadType),
 }
 
-impl TryFrom<std::fs::FileType> for FakeFileType {
-    type Error = ();
-
-    fn try_from(f: std::fs::FileType) -> Result<Self, Self::Error> {
+impl From<std::fs::FileType> for FakeFileType {
+    fn from(f: std::fs::FileType) -> Self {
         if f.is_dir() {
-            Ok(Self::Directory)
+            Self::Directory
         } else if f.is_file() {
-            Ok(Self::File)
+            Self::File
         } else if f.is_symlink() {
-            Ok(Self::Symlink)
+            Self::Symlink
+        } else if f.is_fifo() {
+            Self::BadType(BadType::FIFO)
+        } else if f.is_block_device() {
+            Self::BadType(BadType::BlockDevice)
+        } else if f.is_char_device() {
+            Self::BadType(BadType::CharacterDevice)
+        } else if f.is_socket() {
+            Self::BadType(BadType::Socket)
         } else {
-            // Things like FIFO etc.
-            Err(())
+            Self::BadType(BadType::Unknown)
         }
     }
 }
@@ -942,10 +997,7 @@
             };
             let filename =
                 Cow::Owned(get_bytes_from_os_string(file_name).into());
-            let file_type = match FakeFileType::try_from(file_type) {
-                Ok(file_type) => file_type,
-                Err(_) => continue,
-            };
+            let file_type = FakeFileType::from(file_type);
             results.push(DirEntry {
                 hg_path: filename,
                 fs_path: Cow::Owned(full_path.to_path_buf()),
@@ -974,6 +1026,13 @@
     fn is_symlink(&self) -> bool {
         self.file_type == FakeFileType::Symlink
     }
+
+    fn is_bad(&self) -> Option<BadType> {
+        match self.file_type {
+            FakeFileType::BadType(ty) => Some(ty),
+            _ => None,
+        }
+    }
 }
 
 /// Return the `mtime` of a temporary file newly-created in the `.hg` directory
--- a/rust/hg-core/src/discovery.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/discovery.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -332,7 +332,7 @@
             FastHashMap::default();
         for &rev in self.undecided.as_ref().unwrap() {
             for p in ParentsIterator::graph_parents(&self.graph, rev)? {
-                children.entry(p).or_insert_with(Vec::new).push(rev);
+                children.entry(p).or_default().push(rev);
             }
         }
         self.children_cache = Some(children);
@@ -481,6 +481,13 @@
     use super::*;
     use crate::testing::SampleGraph;
 
+    /// Shorthand to reduce boilerplate when creating [`Revision`] for testing
+    macro_rules! R {
+        ($revision:literal) => {
+            Revision($revision)
+        };
+    }
+
     /// A PartialDiscovery as for pushing all the heads of `SampleGraph`
     ///
     /// To avoid actual randomness in these tests, we give it a fixed
@@ -488,7 +495,7 @@
     fn full_disco() -> PartialDiscovery<SampleGraph> {
         PartialDiscovery::new_with_seed(
             SampleGraph,
-            vec![10, 11, 12, 13],
+            vec![R!(10), R!(11), R!(12), R!(13)],
             [0; 16],
             true,
             true,
@@ -501,7 +508,7 @@
     fn disco12() -> PartialDiscovery<SampleGraph> {
         PartialDiscovery::new_with_seed(
             SampleGraph,
-            vec![12],
+            vec![R!(12)],
             [0; 16],
             true,
             true,
@@ -540,7 +547,7 @@
         assert!(!disco.has_info());
         assert_eq!(disco.stats().undecided, None);
 
-        disco.add_common_revisions(vec![11, 12])?;
+        disco.add_common_revisions(vec![R!(11), R!(12)])?;
         assert!(disco.has_info());
         assert!(!disco.is_complete());
         assert!(disco.missing.is_empty());
@@ -559,14 +566,14 @@
     #[test]
     fn test_discovery() -> Result<(), GraphError> {
         let mut disco = full_disco();
-        disco.add_common_revisions(vec![11, 12])?;
-        disco.add_missing_revisions(vec![8, 10])?;
+        disco.add_common_revisions(vec![R!(11), R!(12)])?;
+        disco.add_missing_revisions(vec![R!(8), R!(10)])?;
         assert_eq!(sorted_undecided(&disco), vec![5]);
         assert_eq!(sorted_missing(&disco), vec![8, 10, 13]);
         assert!(!disco.is_complete());
 
-        disco.add_common_revisions(vec![5])?;
-        assert_eq!(sorted_undecided(&disco), vec![]);
+        disco.add_common_revisions(vec![R!(5)])?;
+        assert_eq!(sorted_undecided(&disco), Vec::<Revision>::new());
         assert_eq!(sorted_missing(&disco), vec![8, 10, 13]);
         assert!(disco.is_complete());
         assert_eq!(sorted_common_heads(&disco)?, vec![5, 11, 12]);
@@ -577,12 +584,12 @@
     fn test_add_missing_early_continue() -> Result<(), GraphError> {
         eprintln!("test_add_missing_early_stop");
         let mut disco = full_disco();
-        disco.add_common_revisions(vec![13, 3, 4])?;
+        disco.add_common_revisions(vec![R!(13), R!(3), R!(4)])?;
         disco.ensure_children_cache()?;
         // 12 is grand-child of 6 through 9
         // passing them in this order maximizes the chances of the
         // early continue to do the wrong thing
-        disco.add_missing_revisions(vec![6, 9, 12])?;
+        disco.add_missing_revisions(vec![R!(6), R!(9), R!(12)])?;
         assert_eq!(sorted_undecided(&disco), vec![5, 7, 10, 11]);
         assert_eq!(sorted_missing(&disco), vec![6, 9, 12]);
         assert!(!disco.is_complete());
@@ -591,18 +598,24 @@
 
     #[test]
     fn test_limit_sample_no_need_to() {
-        let sample = vec![1, 2, 3, 4];
+        let sample = vec![R!(1), R!(2), R!(3), R!(4)];
         assert_eq!(full_disco().limit_sample(sample, 10), vec![1, 2, 3, 4]);
     }
 
     #[test]
     fn test_limit_sample_less_than_half() {
-        assert_eq!(full_disco().limit_sample((1..6).collect(), 2), vec![2, 5]);
+        assert_eq!(
+            full_disco().limit_sample((1..6).map(Revision).collect(), 2),
+            vec![2, 5]
+        );
     }
 
     #[test]
     fn test_limit_sample_more_than_half() {
-        assert_eq!(full_disco().limit_sample((1..4).collect(), 2), vec![1, 2]);
+        assert_eq!(
+            full_disco().limit_sample((1..4).map(Revision).collect(), 2),
+            vec![1, 2]
+        );
     }
 
     #[test]
@@ -610,7 +623,10 @@
         let mut disco = full_disco();
         disco.randomize = false;
         assert_eq!(
-            disco.limit_sample(vec![1, 8, 13, 5, 7, 3], 4),
+            disco.limit_sample(
+                vec![R!(1), R!(8), R!(13), R!(5), R!(7), R!(3)],
+                4
+            ),
             vec![1, 3, 5, 7]
         );
     }
@@ -618,7 +634,7 @@
     #[test]
     fn test_quick_sample_enough_undecided_heads() -> Result<(), GraphError> {
         let mut disco = full_disco();
-        disco.undecided = Some((1..=13).collect());
+        disco.undecided = Some((1..=13).map(Revision).collect());
 
         let mut sample_vec = disco.take_quick_sample(vec![], 4)?;
         sample_vec.sort_unstable();
@@ -631,7 +647,7 @@
         let mut disco = disco12();
         disco.ensure_undecided()?;
 
-        let mut sample_vec = disco.take_quick_sample(vec![12], 4)?;
+        let mut sample_vec = disco.take_quick_sample(vec![R!(12)], 4)?;
         sample_vec.sort_unstable();
         // r12's only parent is r9, whose unique grand-parent through the
         // diamond shape is r4. This ends there because the distance from r4
@@ -646,16 +662,16 @@
         disco.ensure_children_cache()?;
 
         let cache = disco.children_cache.unwrap();
-        assert_eq!(cache.get(&2).cloned(), Some(vec![4]));
-        assert_eq!(cache.get(&10).cloned(), None);
+        assert_eq!(cache.get(&R!(2)).cloned(), Some(vec![R!(4)]));
+        assert_eq!(cache.get(&R!(10)).cloned(), None);
 
-        let mut children_4 = cache.get(&4).cloned().unwrap();
+        let mut children_4 = cache.get(&R!(4)).cloned().unwrap();
         children_4.sort_unstable();
-        assert_eq!(children_4, vec![5, 6, 7]);
+        assert_eq!(children_4, vec![R!(5), R!(6), R!(7)]);
 
-        let mut children_7 = cache.get(&7).cloned().unwrap();
+        let mut children_7 = cache.get(&R!(7)).cloned().unwrap();
         children_7.sort_unstable();
-        assert_eq!(children_7, vec![9, 11]);
+        assert_eq!(children_7, vec![R!(9), R!(11)]);
 
         Ok(())
     }
@@ -664,14 +680,14 @@
     fn test_complete_sample() {
         let mut disco = full_disco();
         let undecided: HashSet<Revision> =
-            [4, 7, 9, 2, 3].iter().cloned().collect();
+            [4, 7, 9, 2, 3].iter().cloned().map(Revision).collect();
         disco.undecided = Some(undecided);
 
-        let mut sample = vec![0];
+        let mut sample = vec![R!(0)];
         disco.random_complete_sample(&mut sample, 3);
         assert_eq!(sample.len(), 3);
 
-        let mut sample = vec![2, 4, 7];
+        let mut sample = vec![R!(2), R!(4), R!(7)];
         disco.random_complete_sample(&mut sample, 1);
         assert_eq!(sample.len(), 3);
     }
@@ -679,7 +695,7 @@
     #[test]
     fn test_bidirectional_sample() -> Result<(), GraphError> {
         let mut disco = full_disco();
-        disco.undecided = Some((0..=13).into_iter().collect());
+        disco.undecided = Some((0..=13).map(Revision).collect());
 
         let (sample_set, size) = disco.bidirectional_sample(7)?;
         assert_eq!(size, 7);
--- a/rust/hg-core/src/filepatterns.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/filepatterns.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -24,7 +24,7 @@
 lazy_static! {
     static ref RE_ESCAPE: Vec<Vec<u8>> = {
         let mut v: Vec<Vec<u8>> = (0..=255).map(|byte| vec![byte]).collect();
-        let to_escape = b"()[]{}?*+-|^$\\.&~# \t\n\r\x0b\x0c";
+        let to_escape = b"()[]{}?*+-|^$\\.&~#\t\n\r\x0b\x0c";
         for byte in to_escape {
             v[*byte as usize].insert(0, b'\\');
         }
@@ -36,9 +36,6 @@
 const GLOB_REPLACEMENTS: &[(&[u8], &[u8])] =
     &[(b"*/", b"(?:.*/)?"), (b"*", b".*"), (b"", b"[^/]*")];
 
-/// Appended to the regexp of globs
-const GLOB_SUFFIX: &[u8; 7] = b"(?:/|$)";
-
 #[derive(Debug, Clone, PartialEq, Eq)]
 pub enum PatternSyntax {
     /// A regular expression
@@ -181,7 +178,7 @@
 /// Builds the regex that corresponds to the given pattern.
 /// If within a `syntax: regexp` context, returns the pattern,
 /// otherwise, returns the corresponding regex.
-fn _build_single_regex(entry: &IgnorePattern) -> Vec<u8> {
+fn _build_single_regex(entry: &IgnorePattern, glob_suffix: &[u8]) -> Vec<u8> {
     let IgnorePattern {
         syntax, pattern, ..
     } = entry;
@@ -245,13 +242,13 @@
         PatternSyntax::RelGlob => {
             let glob_re = glob_to_re(pattern);
             if let Some(rest) = glob_re.drop_prefix(b"[^/]*") {
-                [b".*", rest, GLOB_SUFFIX].concat()
+                [b".*", rest, glob_suffix].concat()
             } else {
-                [b"(?:.*/)?", glob_re.as_slice(), GLOB_SUFFIX].concat()
+                [b"(?:.*/)?", glob_re.as_slice(), glob_suffix].concat()
             }
         }
         PatternSyntax::Glob | PatternSyntax::RootGlob => {
-            [glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
+            [glob_to_re(pattern).as_slice(), glob_suffix].concat()
         }
         PatternSyntax::Include
         | PatternSyntax::SubInclude
@@ -309,6 +306,7 @@
 /// that don't need to be transformed into a regex.
 pub fn build_single_regex(
     entry: &IgnorePattern,
+    glob_suffix: &[u8],
 ) -> Result<Option<Vec<u8>>, PatternError> {
     let IgnorePattern {
         pattern, syntax, ..
@@ -317,6 +315,7 @@
         PatternSyntax::RootGlob
         | PatternSyntax::Path
         | PatternSyntax::RelGlob
+        | PatternSyntax::RelPath
         | PatternSyntax::RootFiles => normalize_path_bytes(pattern),
         PatternSyntax::Include | PatternSyntax::SubInclude => {
             return Err(PatternError::NonRegexPattern(entry.clone()))
@@ -330,22 +329,27 @@
     } else {
         let mut entry = entry.clone();
         entry.pattern = pattern;
-        Ok(Some(_build_single_regex(&entry)))
+        Ok(Some(_build_single_regex(&entry, glob_suffix)))
     }
 }
 
 lazy_static! {
-    static ref SYNTAXES: FastHashMap<&'static [u8], &'static [u8]> = {
+    static ref SYNTAXES: FastHashMap<&'static [u8], PatternSyntax> = {
         let mut m = FastHashMap::default();
 
-        m.insert(b"re".as_ref(), b"relre:".as_ref());
-        m.insert(b"regexp".as_ref(), b"relre:".as_ref());
-        m.insert(b"glob".as_ref(), b"relglob:".as_ref());
-        m.insert(b"rootglob".as_ref(), b"rootglob:".as_ref());
-        m.insert(b"include".as_ref(), b"include:".as_ref());
-        m.insert(b"subinclude".as_ref(), b"subinclude:".as_ref());
-        m.insert(b"path".as_ref(), b"path:".as_ref());
-        m.insert(b"rootfilesin".as_ref(), b"rootfilesin:".as_ref());
+        m.insert(b"re:".as_ref(), PatternSyntax::Regexp);
+        m.insert(b"regexp:".as_ref(), PatternSyntax::Regexp);
+        m.insert(b"path:".as_ref(), PatternSyntax::Path);
+        m.insert(b"filepath:".as_ref(), PatternSyntax::FilePath);
+        m.insert(b"relpath:".as_ref(), PatternSyntax::RelPath);
+        m.insert(b"rootfilesin:".as_ref(), PatternSyntax::RootFiles);
+        m.insert(b"relglob:".as_ref(), PatternSyntax::RelGlob);
+        m.insert(b"relre:".as_ref(), PatternSyntax::RelRegexp);
+        m.insert(b"glob:".as_ref(), PatternSyntax::Glob);
+        m.insert(b"rootglob:".as_ref(), PatternSyntax::RootGlob);
+        m.insert(b"include:".as_ref(), PatternSyntax::Include);
+        m.insert(b"subinclude:".as_ref(), PatternSyntax::SubInclude);
+
         m
     };
 }
@@ -358,11 +362,50 @@
     NoSuchFile(PathBuf),
 }
 
+pub fn parse_one_pattern(
+    pattern: &[u8],
+    source: &Path,
+    default: PatternSyntax,
+    normalize: bool,
+) -> IgnorePattern {
+    let mut pattern_bytes: &[u8] = pattern;
+    let mut syntax = default;
+
+    for (s, val) in SYNTAXES.iter() {
+        if let Some(rest) = pattern_bytes.drop_prefix(s) {
+            syntax = val.clone();
+            pattern_bytes = rest;
+            break;
+        }
+    }
+
+    let pattern = match syntax {
+        PatternSyntax::RootGlob
+        | PatternSyntax::Path
+        | PatternSyntax::Glob
+        | PatternSyntax::RelGlob
+        | PatternSyntax::RelPath
+        | PatternSyntax::RootFiles
+            if normalize =>
+        {
+            normalize_path_bytes(pattern_bytes)
+        }
+        _ => pattern_bytes.to_vec(),
+    };
+
+    IgnorePattern {
+        syntax,
+        pattern,
+        source: source.to_owned(),
+    }
+}
+
 pub fn parse_pattern_file_contents(
     lines: &[u8],
     file_path: &Path,
-    default_syntax_override: Option<&[u8]>,
+    default_syntax_override: Option<PatternSyntax>,
     warn: bool,
+    relativize: bool,
 ) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
     let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
 
@@ -372,11 +415,9 @@
     let mut warnings: Vec<PatternFileWarning> = vec![];
 
     let mut current_syntax =
-        default_syntax_override.unwrap_or_else(|| b"relre:".as_ref());
+        default_syntax_override.unwrap_or(PatternSyntax::RelRegexp);
 
-    for (line_number, mut line) in lines.split(|c| *c == b'\n').enumerate() {
-        let line_number = line_number + 1;
-
+    for mut line in lines.split(|c| *c == b'\n') {
         let line_buf;
         if line.contains(&b'#') {
             if let Some(cap) = comment_regex.captures(line) {
@@ -386,7 +427,7 @@
             line = &line_buf;
         }
 
-        let mut line = line.trim_end();
+        let line = line.trim_end();
 
         if line.is_empty() {
             continue;
@@ -395,50 +436,62 @@
         if let Some(syntax) = line.drop_prefix(b"syntax:") {
             let syntax = syntax.trim();
 
-            if let Some(rel_syntax) = SYNTAXES.get(syntax) {
-                current_syntax = rel_syntax;
+            if let Some(parsed) =
+                SYNTAXES.get([syntax, &b":"[..]].concat().as_slice())
+            {
+                current_syntax = parsed.clone();
             } else if warn {
                 warnings.push(PatternFileWarning::InvalidSyntax(
                     file_path.to_owned(),
                     syntax.to_owned(),
                 ));
             }
-            continue;
+        } else {
+            let pattern = parse_one_pattern(
+                line,
+                file_path,
+                current_syntax.clone(),
+                false,
+            );
+            inputs.push(if relativize {
+                pattern.to_relative()
+            } else {
+                pattern
+            })
         }
-
-        let mut line_syntax: &[u8] = current_syntax;
-
-        for (s, rels) in SYNTAXES.iter() {
-            if let Some(rest) = line.drop_prefix(rels) {
-                line_syntax = rels;
-                line = rest;
-                break;
-            }
-            if let Some(rest) = line.drop_prefix(&[s, &b":"[..]].concat()) {
-                line_syntax = rels;
-                line = rest;
-                break;
-            }
-        }
-
-        inputs.push(IgnorePattern::new(
-            parse_pattern_syntax(line_syntax).map_err(|e| match e {
-                PatternError::UnsupportedSyntax(syntax) => {
-                    PatternError::UnsupportedSyntaxInFile(
-                        syntax,
-                        file_path.to_string_lossy().into(),
-                        line_number,
-                    )
-                }
-                _ => e,
-            })?,
-            line,
-            file_path,
-        ));
     }
     Ok((inputs, warnings))
 }
 
+pub fn parse_pattern_args(
+    patterns: Vec<Vec<u8>>,
+    cwd: &Path,
+    root: &Path,
+) -> Result<Vec<IgnorePattern>, HgPathError> {
+    let mut ignore_patterns: Vec<IgnorePattern> = Vec::new();
+    for pattern in patterns {
+        let pattern = parse_one_pattern(
+            &pattern,
+            Path::new("<args>"),
+            PatternSyntax::RelPath,
+            true,
+        );
+        match pattern.syntax {
+            PatternSyntax::RelGlob | PatternSyntax::RelPath => {
+                let name = get_path_from_bytes(&pattern.pattern);
+                let canon = canonical_path(root, cwd, name)?;
+                ignore_patterns.push(IgnorePattern {
+                    syntax: pattern.syntax,
+                    pattern: get_bytes_from_path(canon),
+                    source: pattern.source,
+                })
+            }
+            _ => ignore_patterns.push(pattern.to_owned()),
+        };
+    }
+    Ok(ignore_patterns)
+}
+
 pub fn read_pattern_file(
     file_path: &Path,
     warn: bool,
@@ -447,7 +500,7 @@
     match std::fs::read(file_path) {
         Ok(contents) => {
             inspect_pattern_bytes(file_path, &contents);
-            parse_pattern_file_contents(&contents, file_path, None, warn)
+            parse_pattern_file_contents(&contents, file_path, None, warn, true)
         }
         Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok((
             vec![],
@@ -473,6 +526,23 @@
             source: source.to_owned(),
         }
     }
+
+    pub fn to_relative(self) -> Self {
+        let Self {
+            syntax,
+            pattern,
+            source,
+        } = self;
+        Self {
+            syntax: match syntax {
+                PatternSyntax::Regexp => PatternSyntax::RelRegexp,
+                PatternSyntax::Glob => PatternSyntax::RelGlob,
+                x => x,
+            },
+            pattern,
+            source,
+        }
+    }
 }
 
 pub type PatternResult<T> = Result<T, PatternError>;
@@ -559,8 +629,7 @@
             normalize_path_bytes(&get_bytes_from_path(source));
 
         let source_root = get_path_from_bytes(&normalized_source);
-        let source_root =
-            source_root.parent().unwrap_or_else(|| source_root.deref());
+        let source_root = source_root.parent().unwrap_or(source_root);
 
         let path = source_root.join(get_path_from_bytes(pattern));
         let new_root = path.parent().unwrap_or_else(|| path.deref());
@@ -612,22 +681,21 @@
         assert_eq!(escape_pattern(untouched), untouched.to_vec());
         // All escape codes
         assert_eq!(
-            escape_pattern(br#"()[]{}?*+-|^$\\.&~# \t\n\r\v\f"#),
-            br#"\(\)\[\]\{\}\?\*\+\-\|\^\$\\\\\.\&\~\#\ \\t\\n\\r\\v\\f"#
-                .to_vec()
+            escape_pattern(br"()[]{}?*+-|^$\\.&~#\t\n\r\v\f"),
+            br"\(\)\[\]\{\}\?\*\+\-\|\^\$\\\\\.\&\~\#\\t\\n\\r\\v\\f".to_vec()
         );
     }
 
     #[test]
     fn glob_test() {
-        assert_eq!(glob_to_re(br#"?"#), br#"."#);
-        assert_eq!(glob_to_re(br#"*"#), br#"[^/]*"#);
-        assert_eq!(glob_to_re(br#"**"#), br#".*"#);
-        assert_eq!(glob_to_re(br#"**/a"#), br#"(?:.*/)?a"#);
-        assert_eq!(glob_to_re(br#"a/**/b"#), br#"a/(?:.*/)?b"#);
-        assert_eq!(glob_to_re(br#"[a*?!^][^b][!c]"#), br#"[a*?!^][\^b][^c]"#);
-        assert_eq!(glob_to_re(br#"{a,b}"#), br#"(?:a|b)"#);
-        assert_eq!(glob_to_re(br#".\*\?"#), br#"\.\*\?"#);
+        assert_eq!(glob_to_re(br"?"), br".");
+        assert_eq!(glob_to_re(br"*"), br"[^/]*");
+        assert_eq!(glob_to_re(br"**"), br".*");
+        assert_eq!(glob_to_re(br"**/a"), br"(?:.*/)?a");
+        assert_eq!(glob_to_re(br"a/**/b"), br"a/(?:.*/)?b");
+        assert_eq!(glob_to_re(br"[a*?!^][^b][!c]"), br"[a*?!^][\^b][^c]");
+        assert_eq!(glob_to_re(br"{a,b}"), br"(?:a|b)");
+        assert_eq!(glob_to_re(br".\*\?"), br"\.\*\?");
     }
 
     #[test]
@@ -639,7 +707,8 @@
                 lines,
                 Path::new("file_path"),
                 None,
-                false
+                false,
+                true,
             )
             .unwrap()
             .0,
@@ -657,7 +726,8 @@
                 lines,
                 Path::new("file_path"),
                 None,
-                false
+                false,
+                true,
             )
             .unwrap()
             .0,
@@ -669,7 +739,8 @@
                 lines,
                 Path::new("file_path"),
                 None,
-                false
+                false,
+                true,
             )
             .unwrap()
             .0,
@@ -684,20 +755,26 @@
     #[test]
     fn test_build_single_regex() {
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RelGlob,
-                b"rust/target/",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RelGlob,
+                    b"rust/target/",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(br"(?:.*/)?rust/target(?:/|$)".to_vec()),
         );
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::Regexp,
-                br"rust/target/\d+",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::Regexp,
+                    br"rust/target/\d+",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(br"rust/target/\d+".to_vec()),
         );
@@ -706,29 +783,38 @@
     #[test]
     fn test_build_single_regex_shortcut() {
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RootGlob,
-                b"",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RootGlob,
+                    b"",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             None,
         );
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RootGlob,
-                b"whatever",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RootGlob,
+                    b"whatever",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             None,
         );
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RootGlob,
-                b"*.o",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RootGlob,
+                    b"*.o",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(br"[^/]*\.o(?:/|$)".to_vec()),
         );
@@ -737,38 +823,50 @@
     #[test]
     fn test_build_single_relregex() {
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RelRegexp,
-                b"^ba{2}r",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RelRegexp,
+                    b"^ba{2}r",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(b"^ba{2}r".to_vec()),
         );
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RelRegexp,
-                b"ba{2}r",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RelRegexp,
+                    b"ba{2}r",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(b".*ba{2}r".to_vec()),
         );
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RelRegexp,
-                b"(?ia)ba{2}r",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RelRegexp,
+                    b"(?ia)ba{2}r",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(b"(?ia:.*ba{2}r)".to_vec()),
         );
         assert_eq!(
-            build_single_regex(&IgnorePattern::new(
-                PatternSyntax::RelRegexp,
-                b"(?ia)^ba{2}r",
-                Path::new("")
-            ))
+            build_single_regex(
+                &IgnorePattern::new(
+                    PatternSyntax::RelRegexp,
+                    b"(?ia)^ba{2}r",
+                    Path::new("")
+                ),
+                b"(?:/|$)"
+            )
             .unwrap(),
             Some(b"(?ia:^ba{2}r)".to_vec()),
         );
--- a/rust/hg-core/src/lib.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/lib.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -25,7 +25,7 @@
     DirstateEntry, DirstateParents, EntryState,
 };
 pub mod copy_tracing;
-mod filepatterns;
+pub mod filepatterns;
 pub mod matchers;
 pub mod repo;
 pub mod revlog;
@@ -66,6 +66,12 @@
     InvalidPath(HgPathError),
 }
 
+impl From<HgPathError> for DirstateMapError {
+    fn from(error: HgPathError) -> Self {
+        Self::InvalidPath(error)
+    }
+}
+
 impl fmt::Display for DirstateMapError {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
@@ -83,6 +89,12 @@
     Common(errors::HgError),
 }
 
+impl From<HgPathError> for DirstateError {
+    fn from(error: HgPathError) -> Self {
+        Self::Map(DirstateMapError::InvalidPath(error))
+    }
+}
+
 impl fmt::Display for DirstateError {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         match self {
--- a/rust/hg-core/src/matchers.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/matchers.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -7,6 +7,9 @@
 
 //! Structs and types for matching files and directories.
 
+use format_bytes::format_bytes;
+use once_cell::sync::OnceCell;
+
 use crate::{
     dirstate::dirs_multiset::DirsChildrenMultiset,
     filepatterns::{
@@ -15,20 +18,18 @@
     },
     utils::{
         files::find_dirs,
-        hg_path::{HgPath, HgPathBuf},
+        hg_path::{HgPath, HgPathBuf, HgPathError},
         Escaped,
     },
-    DirsMultiset, DirstateMapError, FastHashMap, IgnorePattern, PatternError,
-    PatternSyntax,
+    DirsMultiset, FastHashMap, IgnorePattern, PatternError, PatternSyntax,
 };
 
 use crate::dirstate::status::IgnoreFnType;
 use crate::filepatterns::normalize_path_bytes;
-use std::borrow::ToOwned;
 use std::collections::HashSet;
 use std::fmt::{Display, Error, Formatter};
-use std::ops::Deref;
 use std::path::{Path, PathBuf};
+use std::{borrow::ToOwned, collections::BTreeSet};
 
 #[derive(Debug, PartialEq)]
 pub enum VisitChildrenSet {
@@ -174,14 +175,16 @@
 pub struct FileMatcher {
     files: HashSet<HgPathBuf>,
     dirs: DirsMultiset,
+    sorted_visitchildrenset_candidates: OnceCell<BTreeSet<HgPathBuf>>,
 }
 
 impl FileMatcher {
-    pub fn new(files: Vec<HgPathBuf>) -> Result<Self, DirstateMapError> {
+    pub fn new(files: Vec<HgPathBuf>) -> Result<Self, HgPathError> {
         let dirs = DirsMultiset::from_manifest(&files)?;
         Ok(Self {
-            files: HashSet::from_iter(files.into_iter()),
+            files: HashSet::from_iter(files),
             dirs,
+            sorted_visitchildrenset_candidates: OnceCell::new(),
         })
     }
     fn inner_matches(&self, filename: &HgPath) -> bool {
@@ -200,30 +203,34 @@
         self.inner_matches(filename)
     }
     fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet {
-        if self.files.is_empty() || !self.dirs.contains(&directory) {
+        if self.files.is_empty() || !self.dirs.contains(directory) {
             return VisitChildrenSet::Empty;
         }
-        let mut candidates: HashSet<HgPathBuf> =
-            self.dirs.iter().cloned().collect();
 
-        candidates.extend(self.files.iter().cloned());
-        candidates.remove(HgPath::new(b""));
-
-        if !directory.as_ref().is_empty() {
-            let directory = [directory.as_ref().as_bytes(), b"/"].concat();
-            candidates = candidates
-                .iter()
-                .filter_map(|c| {
-                    if c.as_bytes().starts_with(&directory) {
-                        Some(HgPathBuf::from_bytes(
-                            &c.as_bytes()[directory.len()..],
-                        ))
-                    } else {
-                        None
-                    }
-                })
-                .collect();
-        }
+        let compute_candidates = || -> BTreeSet<HgPathBuf> {
+            let mut candidates: BTreeSet<HgPathBuf> =
+                self.dirs.iter().cloned().collect();
+            candidates.extend(self.files.iter().cloned());
+            candidates.remove(HgPath::new(b""));
+            candidates
+        };
+        let candidates =
+            if directory.as_ref().is_empty() {
+                compute_candidates()
+            } else {
+                let sorted_candidates = self
+                    .sorted_visitchildrenset_candidates
+                    .get_or_init(compute_candidates);
+                let directory_bytes = directory.as_ref().as_bytes();
+                let start: HgPathBuf =
+                    format_bytes!(b"{}/", directory_bytes).into();
+                let start_len = start.len();
+                // `0` sorts after `/`
+                let end = format_bytes!(b"{}0", directory_bytes).into();
+                BTreeSet::from_iter(sorted_candidates.range(start..end).map(
+                    |c| HgPathBuf::from_bytes(&c.as_bytes()[start_len..]),
+                ))
+            };
 
         // `self.dirs` includes all of the directories, recursively, so if
         // we're attempting to match 'foo/bar/baz.txt', it'll have '', 'foo',
@@ -251,6 +258,118 @@
     }
 }
 
+/// Matches a set of (kind, pat, source) against a 'root' directory.
+/// (Currently the 'root' directory is effectively always empty)
+/// ```
+/// use hg::{
+///     matchers::{PatternMatcher, Matcher},
+///     IgnorePattern,
+///     PatternSyntax,
+///     utils::hg_path::{HgPath, HgPathBuf}
+/// };
+/// use std::collections::HashSet;
+/// use std::path::Path;
+/// ///
+/// let ignore_patterns : Vec<IgnorePattern> =
+///     vec![IgnorePattern::new(PatternSyntax::Regexp, br".*\.c$", Path::new("")),
+///          IgnorePattern::new(PatternSyntax::Path, b"foo/a", Path::new("")),
+///          IgnorePattern::new(PatternSyntax::RelPath, b"b", Path::new("")),
+///          IgnorePattern::new(PatternSyntax::Glob, b"*.h", Path::new("")),
+///     ];
+/// let matcher = PatternMatcher::new(ignore_patterns).unwrap();
+/// ///
+/// assert_eq!(matcher.matches(HgPath::new(b"main.c")), true); // matches re:.*\.c$
+/// assert_eq!(matcher.matches(HgPath::new(b"b.txt")), false);
+/// assert_eq!(matcher.matches(HgPath::new(b"foo/a")), true); // matches path:foo/a
+/// assert_eq!(matcher.matches(HgPath::new(b"a")), false); // does not match path:b, since 'root' is 'foo'
+/// assert_eq!(matcher.matches(HgPath::new(b"b")), true); // matches relpath:b, since 'root' is 'foo'
+/// assert_eq!(matcher.matches(HgPath::new(b"lib.h")), true); // matches glob:*.h
+/// assert_eq!(matcher.file_set().unwrap(),
+///            &HashSet::from([HgPathBuf::from_bytes(b""), HgPathBuf::from_bytes(b"foo/a"),
+///                            HgPathBuf::from_bytes(b""), HgPathBuf::from_bytes(b"b")]));
+/// assert_eq!(matcher.exact_match(HgPath::new(b"foo/a")), true);
+/// assert_eq!(matcher.exact_match(HgPath::new(b"b")), true);
+/// assert_eq!(matcher.exact_match(HgPath::new(b"lib.h")), false); // exact matches are for (rel)path kinds
+/// ```
+pub struct PatternMatcher<'a> {
+    patterns: Vec<u8>,
+    match_fn: IgnoreFnType<'a>,
+    /// Whether all the patterns match a prefix (i.e. recursively)
+    prefix: bool,
+    files: HashSet<HgPathBuf>,
+    dirs: DirsMultiset,
+}
+
+impl core::fmt::Debug for PatternMatcher<'_> {
+    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("PatternMatcher")
+            .field("patterns", &String::from_utf8_lossy(&self.patterns))
+            .field("prefix", &self.prefix)
+            .field("files", &self.files)
+            .field("dirs", &self.dirs)
+            .finish()
+    }
+}
+
+impl<'a> PatternMatcher<'a> {
+    pub fn new(ignore_patterns: Vec<IgnorePattern>) -> PatternResult<Self> {
+        let (files, _) = roots_and_dirs(&ignore_patterns);
+        let dirs = DirsMultiset::from_manifest(&files)?;
+        let files: HashSet<HgPathBuf> = HashSet::from_iter(files);
+
+        let prefix = ignore_patterns.iter().all(|k| {
+            matches!(k.syntax, PatternSyntax::Path | PatternSyntax::RelPath)
+        });
+        let (patterns, match_fn) = build_match(ignore_patterns, b"$")?;
+
+        Ok(Self {
+            patterns,
+            match_fn,
+            prefix,
+            files,
+            dirs,
+        })
+    }
+}
+
+impl<'a> Matcher for PatternMatcher<'a> {
+    fn file_set(&self) -> Option<&HashSet<HgPathBuf>> {
+        Some(&self.files)
+    }
+
+    fn exact_match(&self, filename: &HgPath) -> bool {
+        self.files.contains(filename)
+    }
+
+    fn matches(&self, filename: &HgPath) -> bool {
+        if self.files.contains(filename) {
+            return true;
+        }
+        (self.match_fn)(filename)
+    }
+
+    fn visit_children_set(&self, directory: &HgPath) -> VisitChildrenSet {
+        if self.prefix && self.files.contains(directory) {
+            return VisitChildrenSet::Recursive;
+        }
+        let path_or_parents_in_set = find_dirs(directory)
+            .any(|parent_dir| self.files.contains(parent_dir));
+        if self.dirs.contains(directory) || path_or_parents_in_set {
+            VisitChildrenSet::This
+        } else {
+            VisitChildrenSet::Empty
+        }
+    }
+
+    fn matches_everything(&self) -> bool {
+        false
+    }
+
+    fn is_exact(&self) -> bool {
+        false
+    }
+}
+
 /// Matches files that are included in the ignore rules.
 /// ```
 /// use hg::{
@@ -479,7 +598,13 @@
                 m1_files.iter().cloned().filter(|f| m2.matches(f)).collect()
             })
         } else {
-            None
+            // without exact input file sets, we can't do an exact
+            // intersection, so we must over-approximate by
+            // unioning instead
+            m1.file_set().map(|m1_files| match m2.file_set() {
+                Some(m2_files) => m1_files.union(m2_files).cloned().collect(),
+                None => m1_files.iter().cloned().collect(),
+            })
         };
         Self { m1, m2, files }
     }
@@ -647,14 +772,15 @@
 
 /// Returns the regex pattern and a function that matches an `HgPath` against
 /// said regex formed by the given ignore patterns.
-fn build_regex_match<'a, 'b>(
-    ignore_patterns: &'a [IgnorePattern],
-) -> PatternResult<(Vec<u8>, IgnoreFnType<'b>)> {
+fn build_regex_match<'a>(
+    ignore_patterns: &[IgnorePattern],
+    glob_suffix: &[u8],
+) -> PatternResult<(Vec<u8>, IgnoreFnType<'a>)> {
     let mut regexps = vec![];
     let mut exact_set = HashSet::new();
 
     for pattern in ignore_patterns {
-        if let Some(re) = build_single_regex(pattern)? {
+        if let Some(re) = build_single_regex(pattern, glob_suffix)? {
             regexps.push(re);
         } else {
             let exact = normalize_path_bytes(&pattern.pattern);
@@ -754,20 +880,12 @@
     let mut parents = HashSet::new();
 
     parents.extend(
-        DirsMultiset::from_manifest(&dirs)
-            .map_err(|e| match e {
-                DirstateMapError::InvalidPath(e) => e,
-                _ => unreachable!(),
-            })?
+        DirsMultiset::from_manifest(&dirs)?
             .iter()
             .map(ToOwned::to_owned),
     );
     parents.extend(
-        DirsMultiset::from_manifest(&roots)
-            .map_err(|e| match e {
-                DirstateMapError::InvalidPath(e) => e,
-                _ => unreachable!(),
-            })?
+        DirsMultiset::from_manifest(&roots)?
             .iter()
             .map(ToOwned::to_owned),
     );
@@ -783,6 +901,7 @@
 /// should be matched.
 fn build_match<'a>(
     ignore_patterns: Vec<IgnorePattern>,
+    glob_suffix: &[u8],
 ) -> PatternResult<(Vec<u8>, IgnoreFnType<'a>)> {
     let mut match_funcs: Vec<IgnoreFnType<'a>> = vec![];
     // For debugging and printing
@@ -838,7 +957,7 @@
                 } else {
                     b"."
                 };
-                dirs.contains(dir.deref())
+                dirs.contains(dir)
             };
             match_funcs.push(Box::new(match_func));
 
@@ -846,7 +965,8 @@
             dirs_vec.sort();
             patterns.extend(dirs_vec.escaped_bytes());
         } else {
-            let (new_re, match_func) = build_regex_match(&ignore_patterns)?;
+            let (new_re, match_func) =
+                build_regex_match(&ignore_patterns, glob_suffix)?;
             patterns = new_re;
             match_funcs.push(match_func)
         }
@@ -925,7 +1045,7 @@
         let prefix = ignore_patterns.iter().all(|k| {
             matches!(k.syntax, PatternSyntax::Path | PatternSyntax::RelPath)
         });
-        let (patterns, match_fn) = build_match(ignore_patterns)?;
+        let (patterns, match_fn) = build_match(ignore_patterns, b"(?:/|$)")?;
 
         Ok(Self {
             patterns,
@@ -1122,6 +1242,242 @@
     }
 
     #[test]
+    fn test_patternmatcher() {
+        // VisitdirPrefix
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::Path,
+            b"dir/subdir",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Recursive
+        );
+        // OPT: This should probably be Recursive if its parent is?
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+
+        // VisitchildrensetPrefix
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::Path,
+            b"dir/subdir",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Recursive
+        );
+        // OPT: This should probably be Recursive if its parent is?
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+
+        // VisitdirRootfilesin
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::RootFiles,
+            b"dir/subdir",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+        // FIXME: These should probably be This.
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Empty
+        );
+
+        // VisitchildrensetRootfilesin
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::RootFiles,
+            b"dir/subdir",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+        // FIXME: These should probably be {'dir'}, {'subdir'} and This,
+        // respectively, or at least This for all three.
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Empty
+        );
+
+        // VisitdirGlob
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::Glob,
+            b"dir/z*",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::This
+        );
+        // FIXME: This probably should be This
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+        // OPT: these should probably be False.
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::This
+        );
+
+        // VisitchildrensetGlob
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::Glob,
+            b"dir/z*",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+        // FIXME: This probably should be This
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::Empty
+        );
+        // OPT: these should probably be Empty
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::This
+        );
+
+        // VisitdirFilepath
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::FilePath,
+            b"dir/z",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::Empty
+        );
+
+        // VisitchildrensetFilepath
+        let m = PatternMatcher::new(vec![IgnorePattern::new(
+            PatternSyntax::FilePath,
+            b"dir/z",
+            Path::new(""),
+        )])
+        .unwrap();
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"folder")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir")),
+            VisitChildrenSet::This
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir")),
+            VisitChildrenSet::Empty
+        );
+        assert_eq!(
+            m.visit_children_set(HgPath::new(b"dir/subdir/x")),
+            VisitChildrenSet::Empty
+        );
+    }
+
+    #[test]
     fn test_includematcher() {
         // VisitchildrensetPrefix
         let matcher = IncludeMatcher::new(vec![IgnorePattern::new(
--- a/rust/hg-core/src/narrow.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/narrow.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -74,6 +74,7 @@
         Path::new(""),
         None,
         false,
+        true,
     )?;
     warnings.extend(subwarnings.into_iter().map(From::from));
 
@@ -85,6 +86,7 @@
         Path::new(""),
         None,
         false,
+        true,
     )?;
     if !patterns.is_empty() {
         warnings.extend(subwarnings.into_iter().map(From::from));
--- a/rust/hg-core/src/operations/cat.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/operations/cat.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -84,10 +84,10 @@
     mut files: Vec<&'a HgPath>,
 ) -> Result<CatOutput<'a>, RevlogError> {
     let rev = crate::revset::resolve_single(revset, repo)?;
-    let manifest = repo.manifest_for_rev(rev)?;
+    let manifest = repo.manifest_for_rev(rev.into())?;
     let node = *repo
         .changelog()?
-        .node_from_rev(rev)
+        .node_from_rev(rev.into())
         .expect("should succeed when repo.manifest did");
     let mut results: Vec<(&'a HgPath, Vec<u8>)> = vec![];
     let mut found_any = false;
--- a/rust/hg-core/src/operations/debugdata.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/operations/debugdata.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -33,6 +33,6 @@
         Revlog::open(&repo.store_vfs(), index_file, None, use_nodemap)?;
     let rev =
         crate::revset::resolve_rev_number_or_hex_prefix(revset, &revlog)?;
-    let data = revlog.get_rev_data(rev)?;
+    let data = revlog.get_rev_data_for_checked_rev(rev)?;
     Ok(data.into_owned())
 }
--- a/rust/hg-core/src/operations/list_tracked_files.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/operations/list_tracked_files.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -21,7 +21,7 @@
 ) -> Result<FilesForRev, RevlogError> {
     let rev = crate::revset::resolve_single(revset, repo)?;
     Ok(FilesForRev {
-        manifest: repo.manifest_for_rev(rev)?,
+        manifest: repo.manifest_for_rev(rev.into())?,
         narrow_matcher,
     })
 }
--- a/rust/hg-core/src/repo.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/repo.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -15,8 +15,8 @@
 use crate::utils::hg_path::HgPath;
 use crate::utils::SliceExt;
 use crate::vfs::{is_dir, is_file, Vfs};
-use crate::{requirements, NodePrefix};
-use crate::{DirstateError, Revision};
+use crate::DirstateError;
+use crate::{requirements, NodePrefix, UncheckedRevision};
 use std::cell::{Ref, RefCell, RefMut};
 use std::collections::HashSet;
 use std::io::Seek;
@@ -562,7 +562,7 @@
     /// Returns the manifest of the *changeset* with the given revision number
     pub fn manifest_for_rev(
         &self,
-        revision: Revision,
+        revision: UncheckedRevision,
     ) -> Result<Manifest, RevlogError> {
         self.manifestlog()?.data_for_node(
             self.changelog()?
@@ -686,7 +686,7 @@
                 }
                 file.write_all(&data)?;
                 file.flush()?;
-                file.seek(SeekFrom::Current(0))
+                file.stream_position()
             })()
             .when_writing_file(&data_filename)?;
 
--- a/rust/hg-core/src/revlog/changelog.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/changelog.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -4,6 +4,7 @@
 use crate::revlog::{Revlog, RevlogEntry, RevlogError};
 use crate::utils::hg_path::HgPath;
 use crate::vfs::Vfs;
+use crate::{Graph, GraphError, UncheckedRevision};
 use itertools::Itertools;
 use std::ascii::escape_default;
 use std::borrow::Cow;
@@ -29,15 +30,24 @@
         node: NodePrefix,
     ) -> Result<ChangelogRevisionData, RevlogError> {
         let rev = self.revlog.rev_from_node(node)?;
-        self.data_for_rev(rev)
+        self.entry_for_checked_rev(rev)?.data()
     }
 
     /// Return the [`ChangelogEntry`] for the given revision number.
     pub fn entry_for_rev(
         &self,
+        rev: UncheckedRevision,
+    ) -> Result<ChangelogEntry, RevlogError> {
+        let revlog_entry = self.revlog.get_entry(rev)?;
+        Ok(ChangelogEntry { revlog_entry })
+    }
+
+    /// Same as [`Self::entry_for_rev`] for checked revisions.
+    fn entry_for_checked_rev(
+        &self,
         rev: Revision,
     ) -> Result<ChangelogEntry, RevlogError> {
-        let revlog_entry = self.revlog.get_entry(rev)?;
+        let revlog_entry = self.revlog.get_entry_for_checked_rev(rev)?;
         Ok(ChangelogEntry { revlog_entry })
     }
 
@@ -49,12 +59,12 @@
     /// [entry_for_rev](`Self::entry_for_rev`) and doing everything from there.
     pub fn data_for_rev(
         &self,
-        rev: Revision,
+        rev: UncheckedRevision,
     ) -> Result<ChangelogRevisionData, RevlogError> {
         self.entry_for_rev(rev)?.data()
     }
 
-    pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
+    pub fn node_from_rev(&self, rev: UncheckedRevision) -> Option<&Node> {
         self.revlog.node_from_rev(rev)
     }
 
@@ -66,6 +76,12 @@
     }
 }
 
+impl Graph for Changelog {
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+        self.revlog.parents(rev)
+    }
+}
+
 /// A specialized `RevlogEntry` for `changelog` data format
 ///
 /// This is a `RevlogEntry` with the added semantics that the associated
@@ -330,12 +346,12 @@
 
         let changelog = Changelog { revlog };
         assert_eq!(
-            changelog.data_for_rev(NULL_REVISION)?,
+            changelog.data_for_rev(NULL_REVISION.into())?,
             ChangelogRevisionData::null()
         );
         // same with the intermediate entry object
         assert_eq!(
-            changelog.entry_for_rev(NULL_REVISION)?.data()?,
+            changelog.entry_for_rev(NULL_REVISION.into())?.data()?,
             ChangelogRevisionData::null()
         );
         Ok(())
--- a/rust/hg-core/src/revlog/filelog.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/filelog.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -1,4 +1,5 @@
 use crate::errors::HgError;
+use crate::exit_codes;
 use crate::repo::Repo;
 use crate::revlog::path_encode::path_encode;
 use crate::revlog::NodePrefix;
@@ -8,6 +9,9 @@
 use crate::utils::files::get_path_from_bytes;
 use crate::utils::hg_path::HgPath;
 use crate::utils::SliceExt;
+use crate::Graph;
+use crate::GraphError;
+use crate::UncheckedRevision;
 use std::path::PathBuf;
 
 /// A specialized `Revlog` to work with file data logs.
@@ -16,6 +20,12 @@
     revlog: Revlog,
 }
 
+impl Graph for Filelog {
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+        self.revlog.parents(rev)
+    }
+}
+
 impl Filelog {
     pub fn open_vfs(
         store_vfs: &crate::vfs::Vfs<'_>,
@@ -39,14 +49,14 @@
         file_node: impl Into<NodePrefix>,
     ) -> Result<FilelogRevisionData, RevlogError> {
         let file_rev = self.revlog.rev_from_node(file_node.into())?;
-        self.data_for_rev(file_rev)
+        self.data_for_rev(file_rev.into())
     }
 
     /// The given revision is that of the file as found in a filelog, not of a
     /// changeset.
     pub fn data_for_rev(
         &self,
-        file_rev: Revision,
+        file_rev: UncheckedRevision,
     ) -> Result<FilelogRevisionData, RevlogError> {
         let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
         Ok(FilelogRevisionData(data))
@@ -59,16 +69,25 @@
         file_node: impl Into<NodePrefix>,
     ) -> Result<FilelogEntry, RevlogError> {
         let file_rev = self.revlog.rev_from_node(file_node.into())?;
-        self.entry_for_rev(file_rev)
+        self.entry_for_checked_rev(file_rev)
     }
 
     /// The given revision is that of the file as found in a filelog, not of a
     /// changeset.
     pub fn entry_for_rev(
         &self,
+        file_rev: UncheckedRevision,
+    ) -> Result<FilelogEntry, RevlogError> {
+        Ok(FilelogEntry(self.revlog.get_entry(file_rev)?))
+    }
+
+    fn entry_for_checked_rev(
+        &self,
         file_rev: Revision,
     ) -> Result<FilelogEntry, RevlogError> {
-        Ok(FilelogEntry(self.revlog.get_entry(file_rev)?))
+        Ok(FilelogEntry(
+            self.revlog.get_entry_for_checked_rev(file_rev)?,
+        ))
     }
 }
 
@@ -165,7 +184,19 @@
     }
 
     pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
-        Ok(FilelogRevisionData(self.0.data()?.into_owned()))
+        let data = self.0.data();
+        match data {
+            Ok(data) => Ok(FilelogRevisionData(data.into_owned())),
+            // Errors other than `HgError` should not happen at this point
+            Err(e) => match e {
+                RevlogError::Other(hg_error) => Err(hg_error),
+                revlog_error => Err(HgError::abort(
+                    revlog_error.to_string(),
+                    exit_codes::ABORT,
+                    None,
+                )),
+            },
+        }
     }
 }
 
--- a/rust/hg-core/src/revlog/index.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/index.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -1,3 +1,4 @@
+use std::fmt::Debug;
 use std::ops::Deref;
 
 use byteorder::{BigEndian, ByteOrder};
@@ -5,6 +6,7 @@
 use crate::errors::HgError;
 use crate::revlog::node::Node;
 use crate::revlog::{Revision, NULL_REVISION};
+use crate::{Graph, GraphError, RevlogIndex, UncheckedRevision};
 
 pub const INDEX_ENTRY_SIZE: usize = 64;
 
@@ -86,6 +88,32 @@
     uses_generaldelta: bool,
 }
 
+impl Debug for Index {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("Index")
+            .field("offsets", &self.offsets)
+            .field("uses_generaldelta", &self.uses_generaldelta)
+            .finish()
+    }
+}
+
+impl Graph for Index {
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+        let err = || GraphError::ParentOutOfRange(rev);
+        match self.get_entry(rev) {
+            Some(entry) => {
+                // The C implementation checks that the parents are valid
+                // before returning
+                Ok([
+                    self.check_revision(entry.p1()).ok_or_else(err)?,
+                    self.check_revision(entry.p2()).ok_or_else(err)?,
+                ])
+            }
+            None => Ok([NULL_REVISION, NULL_REVISION]),
+        }
+    }
+}
+
 impl Index {
     /// Create an index from bytes.
     /// Calculate the start of each entry when is_inline is true.
@@ -175,48 +203,44 @@
         if rev == NULL_REVISION {
             return None;
         }
-        if let Some(offsets) = &self.offsets {
+        Some(if let Some(offsets) = &self.offsets {
             self.get_entry_inline(rev, offsets)
         } else {
             self.get_entry_separated(rev)
-        }
+        })
     }
 
     fn get_entry_inline(
         &self,
         rev: Revision,
         offsets: &[usize],
-    ) -> Option<IndexEntry> {
-        let start = *offsets.get(rev as usize)?;
-        let end = start.checked_add(INDEX_ENTRY_SIZE)?;
+    ) -> IndexEntry {
+        let start = offsets[rev.0 as usize];
+        let end = start + INDEX_ENTRY_SIZE;
         let bytes = &self.bytes[start..end];
 
         // See IndexEntry for an explanation of this override.
         let offset_override = Some(end);
 
-        Some(IndexEntry {
+        IndexEntry {
             bytes,
             offset_override,
-        })
+        }
     }
 
-    fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
-        let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
-        if rev as usize >= max_rev {
-            return None;
-        }
-        let start = rev as usize * INDEX_ENTRY_SIZE;
+    fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
+        let start = rev.0 as usize * INDEX_ENTRY_SIZE;
         let end = start + INDEX_ENTRY_SIZE;
         let bytes = &self.bytes[start..end];
 
         // Override the offset of the first revision as its bytes are used
         // for the index's metadata (saving space because it is always 0)
-        let offset_override = if rev == 0 { Some(0) } else { None };
+        let offset_override = if rev == Revision(0) { Some(0) } else { None };
 
-        Some(IndexEntry {
+        IndexEntry {
             bytes,
             offset_override,
-        })
+        }
     }
 }
 
@@ -273,23 +297,23 @@
     }
 
     /// Return the revision upon which the data has been derived.
-    pub fn base_revision_or_base_of_delta_chain(&self) -> Revision {
+    pub fn base_revision_or_base_of_delta_chain(&self) -> UncheckedRevision {
         // TODO Maybe return an Option when base_revision == rev?
         //      Requires to add rev to IndexEntry
 
-        BigEndian::read_i32(&self.bytes[16..])
+        BigEndian::read_i32(&self.bytes[16..]).into()
     }
 
-    pub fn link_revision(&self) -> Revision {
-        BigEndian::read_i32(&self.bytes[20..])
+    pub fn link_revision(&self) -> UncheckedRevision {
+        BigEndian::read_i32(&self.bytes[20..]).into()
     }
 
-    pub fn p1(&self) -> Revision {
-        BigEndian::read_i32(&self.bytes[24..])
+    pub fn p1(&self) -> UncheckedRevision {
+        BigEndian::read_i32(&self.bytes[24..]).into()
     }
 
-    pub fn p2(&self) -> Revision {
-        BigEndian::read_i32(&self.bytes[28..])
+    pub fn p2(&self) -> UncheckedRevision {
+        BigEndian::read_i32(&self.bytes[28..]).into()
     }
 
     /// Return the hash of revision's full text.
@@ -335,8 +359,8 @@
                 offset: 0,
                 compressed_len: 0,
                 uncompressed_len: 0,
-                base_revision_or_base_of_delta_chain: 0,
-                link_revision: 0,
+                base_revision_or_base_of_delta_chain: Revision(0),
+                link_revision: Revision(0),
                 p1: NULL_REVISION,
                 p2: NULL_REVISION,
                 node: NULL_NODE,
@@ -426,11 +450,11 @@
             bytes.extend(&(self.compressed_len as u32).to_be_bytes());
             bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
             bytes.extend(
-                &self.base_revision_or_base_of_delta_chain.to_be_bytes(),
+                &self.base_revision_or_base_of_delta_chain.0.to_be_bytes(),
             );
-            bytes.extend(&self.link_revision.to_be_bytes());
-            bytes.extend(&self.p1.to_be_bytes());
-            bytes.extend(&self.p2.to_be_bytes());
+            bytes.extend(&self.link_revision.0.to_be_bytes());
+            bytes.extend(&self.p1.0.to_be_bytes());
+            bytes.extend(&self.p2.0.to_be_bytes());
             bytes.extend(self.node.as_bytes());
             bytes.extend(vec![0u8; 12]);
             bytes
@@ -540,50 +564,52 @@
     #[test]
     fn test_base_revision_or_base_of_delta_chain() {
         let bytes = IndexEntryBuilder::new()
-            .with_base_revision_or_base_of_delta_chain(1)
+            .with_base_revision_or_base_of_delta_chain(Revision(1))
             .build();
         let entry = IndexEntry {
             bytes: &bytes,
             offset_override: None,
         };
 
-        assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1)
+        assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
     }
 
     #[test]
     fn link_revision_test() {
-        let bytes = IndexEntryBuilder::new().with_link_revision(123).build();
+        let bytes = IndexEntryBuilder::new()
+            .with_link_revision(Revision(123))
+            .build();
 
         let entry = IndexEntry {
             bytes: &bytes,
             offset_override: None,
         };
 
-        assert_eq!(entry.link_revision(), 123);
+        assert_eq!(entry.link_revision(), 123.into());
     }
 
     #[test]
     fn p1_test() {
-        let bytes = IndexEntryBuilder::new().with_p1(123).build();
+        let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
 
         let entry = IndexEntry {
             bytes: &bytes,
             offset_override: None,
         };
 
-        assert_eq!(entry.p1(), 123);
+        assert_eq!(entry.p1(), 123.into());
     }
 
     #[test]
     fn p2_test() {
-        let bytes = IndexEntryBuilder::new().with_p2(123).build();
+        let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
 
         let entry = IndexEntry {
             bytes: &bytes,
             offset_override: None,
         };
 
-        assert_eq!(entry.p2(), 123);
+        assert_eq!(entry.p2(), 123.into());
     }
 
     #[test]
--- a/rust/hg-core/src/revlog/manifest.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/manifest.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -1,10 +1,10 @@
 use crate::errors::HgError;
-use crate::revlog::Revision;
 use crate::revlog::{Node, NodePrefix};
 use crate::revlog::{Revlog, RevlogError};
 use crate::utils::hg_path::HgPath;
 use crate::utils::SliceExt;
 use crate::vfs::Vfs;
+use crate::{Graph, GraphError, Revision, UncheckedRevision};
 
 /// A specialized `Revlog` to work with `manifest` data format.
 pub struct Manifestlog {
@@ -12,6 +12,12 @@
     revlog: Revlog,
 }
 
+impl Graph for Manifestlog {
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+        self.revlog.parents(rev)
+    }
+}
+
 impl Manifestlog {
     /// Open the `manifest` of a repository given by its root.
     pub fn open(store_vfs: &Vfs, use_nodemap: bool) -> Result<Self, HgError> {
@@ -32,7 +38,7 @@
         node: NodePrefix,
     ) -> Result<Manifest, RevlogError> {
         let rev = self.revlog.rev_from_node(node)?;
-        self.data_for_rev(rev)
+        self.data_for_checked_rev(rev)
     }
 
     /// Return the `Manifest` of a given revision number.
@@ -43,9 +49,18 @@
     /// See also `Repo::manifest_for_rev`
     pub fn data_for_rev(
         &self,
+        rev: UncheckedRevision,
+    ) -> Result<Manifest, RevlogError> {
+        let bytes = self.revlog.get_rev_data(rev)?.into_owned();
+        Ok(Manifest { bytes })
+    }
+
+    pub fn data_for_checked_rev(
+        &self,
         rev: Revision,
     ) -> Result<Manifest, RevlogError> {
-        let bytes = self.revlog.get_rev_data(rev)?.into_owned();
+        let bytes =
+            self.revlog.get_rev_data_for_checked_rev(rev)?.into_owned();
         Ok(Manifest { bytes })
     }
 }
--- a/rust/hg-core/src/revlog/mod.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/mod.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -33,24 +33,88 @@
 use crate::errors::HgError;
 use crate::vfs::Vfs;
 
-/// Mercurial revision numbers
-///
 /// As noted in revlog.c, revision numbers are actually encoded in
 /// 4 bytes, and are liberally converted to ints, whence the i32
-pub type Revision = i32;
+pub type BaseRevision = i32;
+
+/// Mercurial revision numbers
+/// In contrast to the more general [`UncheckedRevision`], these are "checked"
+/// in the sense that they should only be used for revisions that are
+/// valid for a given index (i.e. in bounds).
+#[derive(
+    Debug,
+    derive_more::Display,
+    Clone,
+    Copy,
+    Hash,
+    PartialEq,
+    Eq,
+    PartialOrd,
+    Ord,
+)]
+pub struct Revision(pub BaseRevision);
+
+impl format_bytes::DisplayBytes for Revision {
+    fn display_bytes(
+        &self,
+        output: &mut dyn std::io::Write,
+    ) -> std::io::Result<()> {
+        self.0.display_bytes(output)
+    }
+}
+
+/// Unchecked Mercurial revision numbers.
+///
+/// Values of this type have no guarantee of being a valid revision number
+/// in any context. Use method `check_revision` to get a valid revision within
+/// the appropriate index object.
+#[derive(
+    Debug,
+    derive_more::Display,
+    Clone,
+    Copy,
+    Hash,
+    PartialEq,
+    Eq,
+    PartialOrd,
+    Ord,
+)]
+pub struct UncheckedRevision(pub BaseRevision);
+
+impl format_bytes::DisplayBytes for UncheckedRevision {
+    fn display_bytes(
+        &self,
+        output: &mut dyn std::io::Write,
+    ) -> std::io::Result<()> {
+        self.0.display_bytes(output)
+    }
+}
+
+impl From<Revision> for UncheckedRevision {
+    fn from(value: Revision) -> Self {
+        Self(value.0)
+    }
+}
+
+impl From<BaseRevision> for UncheckedRevision {
+    fn from(value: BaseRevision) -> Self {
+        Self(value)
+    }
+}
 
 /// Marker expressing the absence of a parent
 ///
 /// Independently of the actual representation, `NULL_REVISION` is guaranteed
 /// to be smaller than all existing revisions.
-pub const NULL_REVISION: Revision = -1;
+pub const NULL_REVISION: Revision = Revision(-1);
 
 /// Same as `mercurial.node.wdirrev`
 ///
 /// This is also equal to `i32::max_value()`, but it's better to spell
 /// it out explicitely, same as in `mercurial.node`
 #[allow(clippy::unreadable_literal)]
-pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
+pub const WORKING_DIRECTORY_REVISION: UncheckedRevision =
+    UncheckedRevision(0x7fffffff);
 
 pub const WORKING_DIRECTORY_HEX: &str =
     "ffffffffffffffffffffffffffffffffffffffff";
@@ -66,7 +130,6 @@
 #[derive(Clone, Debug, PartialEq)]
 pub enum GraphError {
     ParentOutOfRange(Revision),
-    WorkingDirectoryUnsupported,
 }
 
 /// The Mercurial Revlog Index
@@ -81,10 +144,21 @@
         self.len() == 0
     }
 
-    /// Return a reference to the Node or `None` if rev is out of bounds
-    ///
-    /// `NULL_REVISION` is not considered to be out of bounds.
+    /// Return a reference to the Node or `None` for `NULL_REVISION`
     fn node(&self, rev: Revision) -> Option<&Node>;
+
+    /// Return a [`Revision`] if `rev` is a valid revision number for this
+    /// index
+    fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
+        let rev = rev.0;
+
+        if rev == NULL_REVISION.0 || (rev >= 0 && (rev as usize) < self.len())
+        {
+            Some(Revision(rev))
+        } else {
+            None
+        }
+    }
 }
 
 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
@@ -101,7 +175,7 @@
 
 const NULL_REVLOG_ENTRY_FLAGS: u16 = 0;
 
-#[derive(Debug, derive_more::From)]
+#[derive(Debug, derive_more::From, derive_more::Display)]
 pub enum RevlogError {
     InvalidRevision,
     /// Working directory is not supported
@@ -145,6 +219,12 @@
     nodemap: Option<nodemap::NodeTree>,
 }
 
+impl Graph for Revlog {
+    fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+        self.index.parents(rev)
+    }
+}
+
 impl Revlog {
     /// Open a revlog index file.
     ///
@@ -168,8 +248,8 @@
     ) -> Result<Self, HgError> {
         let index_path = index_path.as_ref();
         let index = {
-            match store_vfs.mmap_open_opt(&index_path)? {
-                None => Index::new(Box::new(vec![])),
+            match store_vfs.mmap_open_opt(index_path)? {
+                None => Index::new(Box::<Vec<_>>::default()),
                 Some(index_mmap) => {
                     let index = Index::new(Box::new(index_mmap))?;
                     Ok(index)
@@ -224,10 +304,11 @@
 
     /// Returns the node ID for the given revision number, if it exists in this
     /// revlog
-    pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
-        if rev == NULL_REVISION {
+    pub fn node_from_rev(&self, rev: UncheckedRevision) -> Option<&Node> {
+        if rev == NULL_REVISION.into() {
             return Some(&NULL_NODE);
         }
+        let rev = self.index.check_revision(rev)?;
         Some(self.index.get_entry(rev)?.hash())
     }
 
@@ -259,8 +340,9 @@
         // TODO: consider building a non-persistent nodemap in memory to
         // optimize these cases.
         let mut found_by_prefix = None;
-        for rev in (-1..self.len() as Revision).rev() {
-            let candidate_node = if rev == -1 {
+        for rev in (-1..self.len() as BaseRevision).rev() {
+            let rev = Revision(rev as BaseRevision);
+            let candidate_node = if rev == Revision(-1) {
                 NULL_NODE
             } else {
                 let index_entry =
@@ -285,8 +367,8 @@
     }
 
     /// Returns whether the given revision exists in this revlog.
-    pub fn has_rev(&self, rev: Revision) -> bool {
-        self.index.get_entry(rev).is_some()
+    pub fn has_rev(&self, rev: UncheckedRevision) -> bool {
+        self.index.check_revision(rev).is_some()
     }
 
     /// Return the full data associated to a revision.
@@ -296,12 +378,23 @@
     /// snapshot to rebuild the final data.
     pub fn get_rev_data(
         &self,
+        rev: UncheckedRevision,
+    ) -> Result<Cow<[u8]>, RevlogError> {
+        if rev == NULL_REVISION.into() {
+            return Ok(Cow::Borrowed(&[]));
+        };
+        self.get_entry(rev)?.data()
+    }
+
+    /// [`Self::get_rev_data`] for checked revisions.
+    pub fn get_rev_data_for_checked_rev(
+        &self,
         rev: Revision,
     ) -> Result<Cow<[u8]>, RevlogError> {
         if rev == NULL_REVISION {
             return Ok(Cow::Borrowed(&[]));
         };
-        Ok(self.get_entry(rev)?.data()?)
+        self.get_entry_for_checked_rev(rev)?.data()
     }
 
     /// Check the hash of some given data against the recorded hash.
@@ -369,8 +462,7 @@
         }
     }
 
-    /// Get an entry of the revlog.
-    pub fn get_entry(
+    fn get_entry_for_checked_rev(
         &self,
         rev: Revision,
     ) -> Result<RevlogEntry, RevlogError> {
@@ -388,36 +480,60 @@
         } else {
             &self.data()[start..end]
         };
+        let base_rev = self
+            .index
+            .check_revision(index_entry.base_revision_or_base_of_delta_chain())
+            .ok_or_else(|| {
+                RevlogError::corrupted(format!(
+                    "base revision for rev {} is invalid",
+                    rev
+                ))
+            })?;
+        let p1 =
+            self.index.check_revision(index_entry.p1()).ok_or_else(|| {
+                RevlogError::corrupted(format!(
+                    "p1 for rev {} is invalid",
+                    rev
+                ))
+            })?;
+        let p2 =
+            self.index.check_revision(index_entry.p2()).ok_or_else(|| {
+                RevlogError::corrupted(format!(
+                    "p2 for rev {} is invalid",
+                    rev
+                ))
+            })?;
         let entry = RevlogEntry {
             revlog: self,
             rev,
             bytes: data,
             compressed_len: index_entry.compressed_len(),
             uncompressed_len: index_entry.uncompressed_len(),
-            base_rev_or_base_of_delta_chain: if index_entry
-                .base_revision_or_base_of_delta_chain()
-                == rev
-            {
+            base_rev_or_base_of_delta_chain: if base_rev == rev {
                 None
             } else {
-                Some(index_entry.base_revision_or_base_of_delta_chain())
+                Some(base_rev)
             },
-            p1: index_entry.p1(),
-            p2: index_entry.p2(),
+            p1,
+            p2,
             flags: index_entry.flags(),
             hash: *index_entry.hash(),
         };
         Ok(entry)
     }
 
-    /// when resolving internal references within revlog, any errors
-    /// should be reported as corruption, instead of e.g. "invalid revision"
-    fn get_entry_internal(
+    /// Get an entry of the revlog.
+    pub fn get_entry(
         &self,
-        rev: Revision,
-    ) -> Result<RevlogEntry, HgError> {
-        self.get_entry(rev)
-            .map_err(|_| corrupted(format!("revision {} out of range", rev)))
+        rev: UncheckedRevision,
+    ) -> Result<RevlogEntry, RevlogError> {
+        if rev == NULL_REVISION.into() {
+            return Ok(self.make_null_entry());
+        }
+        let rev = self.index.check_revision(rev).ok_or_else(|| {
+            RevlogError::corrupted(format!("rev {} is invalid", rev))
+        })?;
+        self.get_entry_for_checked_rev(rev)
     }
 }
 
@@ -475,7 +591,7 @@
         if self.p1 == NULL_REVISION {
             Ok(None)
         } else {
-            Ok(Some(self.revlog.get_entry(self.p1)?))
+            Ok(Some(self.revlog.get_entry_for_checked_rev(self.p1)?))
         }
     }
 
@@ -485,7 +601,7 @@
         if self.p2 == NULL_REVISION {
             Ok(None)
         } else {
-            Ok(Some(self.revlog.get_entry(self.p2)?))
+            Ok(Some(self.revlog.get_entry_for_checked_rev(self.p2)?))
         }
     }
 
@@ -516,7 +632,7 @@
     }
 
     /// The data for this entry, after resolving deltas if any.
-    pub fn rawdata(&self) -> Result<Cow<'revlog, [u8]>, HgError> {
+    pub fn rawdata(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
         let mut entry = self.clone();
         let mut delta_chain = vec![];
 
@@ -526,13 +642,14 @@
         // [_chaininfo] and in [index_deltachain].
         let uses_generaldelta = self.revlog.index.uses_generaldelta();
         while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
-            let base_rev = if uses_generaldelta {
-                base_rev
+            entry = if uses_generaldelta {
+                delta_chain.push(entry);
+                self.revlog.get_entry_for_checked_rev(base_rev)?
             } else {
-                entry.rev - 1
+                let base_rev = UncheckedRevision(entry.rev.0 - 1);
+                delta_chain.push(entry);
+                self.revlog.get_entry(base_rev)?
             };
-            delta_chain.push(entry);
-            entry = self.revlog.get_entry_internal(base_rev)?;
         }
 
         let data = if delta_chain.is_empty() {
@@ -547,7 +664,7 @@
     fn check_data(
         &self,
         data: Cow<'revlog, [u8]>,
-    ) -> Result<Cow<'revlog, [u8]>, HgError> {
+    ) -> Result<Cow<'revlog, [u8]>, RevlogError> {
         if self.revlog.check_hash(
             self.p1,
             self.p2,
@@ -559,22 +676,24 @@
             if (self.flags & REVISION_FLAG_ELLIPSIS) != 0 {
                 return Err(HgError::unsupported(
                     "ellipsis revisions are not supported by rhg",
-                ));
+                )
+                .into());
             }
             Err(corrupted(format!(
                 "hash check failed for revision {}",
                 self.rev
-            )))
+            ))
+            .into())
         }
     }
 
-    pub fn data(&self) -> Result<Cow<'revlog, [u8]>, HgError> {
+    pub fn data(&self) -> Result<Cow<'revlog, [u8]>, RevlogError> {
         let data = self.rawdata()?;
         if self.rev == NULL_REVISION {
             return Ok(data);
         }
         if self.is_censored() {
-            return Err(HgError::CensoredNodeError);
+            return Err(HgError::CensoredNodeError.into());
         }
         self.check_data(data)
     }
@@ -693,13 +812,13 @@
         let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
         assert!(revlog.is_empty());
         assert_eq!(revlog.len(), 0);
-        assert!(revlog.get_entry(0).is_err());
-        assert!(!revlog.has_rev(0));
+        assert!(revlog.get_entry(0.into()).is_err());
+        assert!(!revlog.has_rev(0.into()));
         assert_eq!(
             revlog.rev_from_node(NULL_NODE.into()).unwrap(),
             NULL_REVISION
         );
-        let null_entry = revlog.get_entry(NULL_REVISION).ok().unwrap();
+        let null_entry = revlog.get_entry(NULL_REVISION.into()).ok().unwrap();
         assert_eq!(null_entry.revision(), NULL_REVISION);
         assert!(null_entry.data().unwrap().is_empty());
     }
@@ -727,8 +846,8 @@
             .build();
         let entry2_bytes = IndexEntryBuilder::new()
             .with_offset(INDEX_ENTRY_SIZE)
-            .with_p1(0)
-            .with_p2(1)
+            .with_p1(Revision(0))
+            .with_p2(Revision(1))
             .with_node(node2)
             .build();
         let contents = vec![entry0_bytes, entry1_bytes, entry2_bytes]
@@ -738,8 +857,8 @@
         std::fs::write(temp.path().join("foo.i"), contents).unwrap();
         let revlog = Revlog::open(&vfs, "foo.i", None, false).unwrap();
 
-        let entry0 = revlog.get_entry(0).ok().unwrap();
-        assert_eq!(entry0.revision(), 0);
+        let entry0 = revlog.get_entry(0.into()).ok().unwrap();
+        assert_eq!(entry0.revision(), Revision(0));
         assert_eq!(*entry0.node(), node0);
         assert!(!entry0.has_p1());
         assert_eq!(entry0.p1(), None);
@@ -749,8 +868,8 @@
         let p2_entry = entry0.p2_entry().unwrap();
         assert!(p2_entry.is_none());
 
-        let entry1 = revlog.get_entry(1).ok().unwrap();
-        assert_eq!(entry1.revision(), 1);
+        let entry1 = revlog.get_entry(1.into()).ok().unwrap();
+        assert_eq!(entry1.revision(), Revision(1));
         assert_eq!(*entry1.node(), node1);
         assert!(!entry1.has_p1());
         assert_eq!(entry1.p1(), None);
@@ -760,18 +879,18 @@
         let p2_entry = entry1.p2_entry().unwrap();
         assert!(p2_entry.is_none());
 
-        let entry2 = revlog.get_entry(2).ok().unwrap();
-        assert_eq!(entry2.revision(), 2);
+        let entry2 = revlog.get_entry(2.into()).ok().unwrap();
+        assert_eq!(entry2.revision(), Revision(2));
         assert_eq!(*entry2.node(), node2);
         assert!(entry2.has_p1());
-        assert_eq!(entry2.p1(), Some(0));
-        assert_eq!(entry2.p2(), Some(1));
+        assert_eq!(entry2.p1(), Some(Revision(0)));
+        assert_eq!(entry2.p2(), Some(Revision(1)));
         let p1_entry = entry2.p1_entry().unwrap();
         assert!(p1_entry.is_some());
-        assert_eq!(p1_entry.unwrap().revision(), 0);
+        assert_eq!(p1_entry.unwrap().revision(), Revision(0));
         let p2_entry = entry2.p2_entry().unwrap();
         assert!(p2_entry.is_some());
-        assert_eq!(p2_entry.unwrap().revision(), 1);
+        assert_eq!(p2_entry.unwrap().revision(), Revision(1));
     }
 
     #[test]
@@ -804,29 +923,32 @@
         std::fs::write(temp.path().join("foo.i"), contents).unwrap();
 
         let mut idx = nodemap::tests::TestNtIndex::new();
-        idx.insert_node(0, node0).unwrap();
-        idx.insert_node(1, node1).unwrap();
+        idx.insert_node(Revision(0), node0).unwrap();
+        idx.insert_node(Revision(1), node1).unwrap();
 
         let revlog =
             Revlog::open_gen(&vfs, "foo.i", None, true, Some(idx.nt)).unwrap();
 
         // accessing the data shows the corruption
-        revlog.get_entry(0).unwrap().data().unwrap_err();
+        revlog.get_entry(0.into()).unwrap().data().unwrap_err();
 
-        assert_eq!(revlog.rev_from_node(NULL_NODE.into()).unwrap(), -1);
-        assert_eq!(revlog.rev_from_node(node0.into()).unwrap(), 0);
-        assert_eq!(revlog.rev_from_node(node1.into()).unwrap(), 1);
+        assert_eq!(
+            revlog.rev_from_node(NULL_NODE.into()).unwrap(),
+            Revision(-1)
+        );
+        assert_eq!(revlog.rev_from_node(node0.into()).unwrap(), Revision(0));
+        assert_eq!(revlog.rev_from_node(node1.into()).unwrap(), Revision(1));
         assert_eq!(
             revlog
                 .rev_from_node(NodePrefix::from_hex("000").unwrap())
                 .unwrap(),
-            -1
+            Revision(-1)
         );
         assert_eq!(
             revlog
                 .rev_from_node(NodePrefix::from_hex("b00").unwrap())
                 .unwrap(),
-            1
+            Revision(1)
         );
         // RevlogError does not implement PartialEq
         // (ultimately because io::Error does not)
--- a/rust/hg-core/src/revlog/node.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/node.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -348,7 +348,7 @@
         assert_eq!(Node::from_hex(SAMPLE_NODE_HEX).unwrap(), SAMPLE_NODE);
         assert!(Node::from_hex(not_hex).is_err());
         assert!(Node::from_hex(too_short).is_err());
-        assert!(Node::from_hex(&too_long).is_err());
+        assert!(Node::from_hex(too_long).is_err());
     }
 
     #[test]
--- a/rust/hg-core/src/revlog/nodemap.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/nodemap.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -12,6 +12,8 @@
 //! Following existing implicit conventions, the "nodemap" terminology
 //! is used in a more abstract context.
 
+use crate::UncheckedRevision;
+
 use super::{
     node::NULL_NODE, Node, NodePrefix, Revision, RevlogIndex, NULL_REVISION,
 };
@@ -30,7 +32,7 @@
     /// This can be returned by methods meant for (at most) one match.
     MultipleResults,
     /// A `Revision` stored in the nodemap could not be found in the index
-    RevisionNotInIndex(Revision),
+    RevisionNotInIndex(UncheckedRevision),
 }
 
 /// Mapping system from Mercurial nodes to revision numbers.
@@ -125,7 +127,9 @@
 /// use.
 #[derive(Clone, Debug, Eq, PartialEq)]
 enum Element {
-    Rev(Revision),
+    // This is not a Mercurial revision. It's a `i32` because this is the
+    // right type for this structure.
+    Rev(i32),
     Block(usize),
     None,
 }
@@ -245,17 +249,21 @@
 fn has_prefix_or_none(
     idx: &impl RevlogIndex,
     prefix: NodePrefix,
-    rev: Revision,
+    rev: UncheckedRevision,
 ) -> Result<Option<Revision>, NodeMapError> {
-    idx.node(rev)
-        .ok_or(NodeMapError::RevisionNotInIndex(rev))
-        .map(|node| {
-            if prefix.is_prefix_of(node) {
-                Some(rev)
-            } else {
-                None
-            }
-        })
+    match idx.check_revision(rev) {
+        Some(checked) => idx
+            .node(checked)
+            .ok_or(NodeMapError::RevisionNotInIndex(rev))
+            .map(|node| {
+                if prefix.is_prefix_of(node) {
+                    Some(checked)
+                } else {
+                    None
+                }
+            }),
+        None => Err(NodeMapError::RevisionNotInIndex(rev)),
+    }
 }
 
 /// validate that the candidate's node starts indeed with given prefix,
@@ -266,7 +274,7 @@
 fn validate_candidate(
     idx: &impl RevlogIndex,
     prefix: NodePrefix,
-    candidate: (Option<Revision>, usize),
+    candidate: (Option<UncheckedRevision>, usize),
 ) -> Result<(Option<Revision>, usize), NodeMapError> {
     let (rev, steps) = candidate;
     if let Some(nz_nybble) = prefix.first_different_nybble(&NULL_NODE) {
@@ -384,6 +392,8 @@
     /// be inferred from
     /// the `NodeTree` data is that `rev` is the revision with the longest
     /// common node prefix with the given prefix.
+    /// We return an [`UncheckedRevision`] because we have no guarantee that
+    /// the revision we found is valid for the index.
     ///
     /// The second returned value is the size of the smallest subprefix
     /// of `prefix` that would give the same result, i.e. not the
@@ -392,7 +402,7 @@
     fn lookup(
         &self,
         prefix: NodePrefix,
-    ) -> Result<(Option<Revision>, usize), NodeMapError> {
+    ) -> Result<(Option<UncheckedRevision>, usize), NodeMapError> {
         for (i, visit_item) in self.visit(prefix).enumerate() {
             if let Some(opt) = visit_item.final_revision() {
                 return Ok((opt, i + 1));
@@ -465,8 +475,11 @@
 
         if let Element::Rev(old_rev) = deepest.element {
             let old_node = index
-                .node(old_rev)
-                .ok_or(NodeMapError::RevisionNotInIndex(old_rev))?;
+                .check_revision(old_rev.into())
+                .and_then(|rev| index.node(rev))
+                .ok_or_else(|| {
+                    NodeMapError::RevisionNotInIndex(old_rev.into())
+                })?;
             if old_node == node {
                 return Ok(()); // avoid creating lots of useless blocks
             }
@@ -490,14 +503,14 @@
                 } else {
                     let mut new_block = Block::new();
                     new_block.set(old_nybble, Element::Rev(old_rev));
-                    new_block.set(new_nybble, Element::Rev(rev));
+                    new_block.set(new_nybble, Element::Rev(rev.0));
                     self.growable.push(new_block);
                     break;
                 }
             }
         } else {
             // Free slot in the deepest block: no splitting has to be done
-            block.set(deepest.nybble, Element::Rev(rev));
+            block.set(deepest.nybble, Element::Rev(rev.0));
         }
 
         // Backtrack over visit steps to update references
@@ -623,13 +636,13 @@
 
 impl NodeTreeVisitItem {
     // Return `Some(opt)` if this item is final, with `opt` being the
-    // `Revision` that it may represent.
+    // `UncheckedRevision` that it may represent.
     //
     // If the item is not terminal, return `None`
-    fn final_revision(&self) -> Option<Option<Revision>> {
+    fn final_revision(&self) -> Option<Option<UncheckedRevision>> {
         match self.element {
             Element::Block(_) => None,
-            Element::Rev(r) => Some(Some(r)),
+            Element::Rev(r) => Some(Some(r.into())),
             Element::None => Some(None),
         }
     }
@@ -643,7 +656,7 @@
 
 impl fmt::Debug for NodeTree {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let readonly: &[Block] = &*self.readonly;
+        let readonly: &[Block] = &self.readonly;
         write!(
             f,
             "readonly: {:?}, growable: {:?}, root: {:?}",
@@ -655,7 +668,7 @@
 impl Default for NodeTree {
     /// Create a fully mutable empty NodeTree
     fn default() -> Self {
-        NodeTree::new(Box::new(Vec::new()))
+        NodeTree::new(Box::<Vec<_>>::default())
     }
 }
 
@@ -697,6 +710,13 @@
         )
     }
 
+    /// Shorthand to reduce boilerplate when creating [`Revision`] for testing
+    macro_rules! R {
+        ($revision:literal) => {
+            Revision($revision)
+        };
+    }
+
     #[test]
     fn test_block_debug() {
         let mut block = Block::new();
@@ -733,16 +753,20 @@
         assert_eq!(block.get(4), Element::Rev(1));
     }
 
-    type TestIndex = HashMap<Revision, Node>;
+    type TestIndex = HashMap<UncheckedRevision, Node>;
 
     impl RevlogIndex for TestIndex {
         fn node(&self, rev: Revision) -> Option<&Node> {
-            self.get(&rev)
+            self.get(&rev.into())
         }
 
         fn len(&self) -> usize {
             self.len()
         }
+
+        fn check_revision(&self, rev: UncheckedRevision) -> Option<Revision> {
+            self.get(&rev).map(|_| Revision(rev.0))
+        }
     }
 
     /// Pad hexadecimal Node prefix with zeros on the right
@@ -751,12 +775,12 @@
     /// strings for test data, and brings actual hash size independency.
     #[cfg(test)]
     fn pad_node(hex: &str) -> Node {
-        Node::from_hex(&hex_pad_right(hex)).unwrap()
+        Node::from_hex(hex_pad_right(hex)).unwrap()
     }
 
     /// Pad hexadecimal Node prefix with zeros on the right, then insert
     fn pad_insert(idx: &mut TestIndex, rev: Revision, hex: &str) {
-        idx.insert(rev, pad_node(hex));
+        idx.insert(rev.into(), pad_node(hex));
     }
 
     fn sample_nodetree() -> NodeTree {
@@ -786,18 +810,21 @@
     #[test]
     fn test_immutable_find_simplest() -> Result<(), NodeMapError> {
         let mut idx: TestIndex = HashMap::new();
-        pad_insert(&mut idx, 1, "1234deadcafe");
+        pad_insert(&mut idx, R!(1), "1234deadcafe");
 
         let nt = NodeTree::from(vec![block! {1: Rev(1)}]);
-        assert_eq!(nt.find_bin(&idx, hex("1"))?, Some(1));
-        assert_eq!(nt.find_bin(&idx, hex("12"))?, Some(1));
-        assert_eq!(nt.find_bin(&idx, hex("1234de"))?, Some(1));
+        assert_eq!(nt.find_bin(&idx, hex("1"))?, Some(R!(1)));
+        assert_eq!(nt.find_bin(&idx, hex("12"))?, Some(R!(1)));
+        assert_eq!(nt.find_bin(&idx, hex("1234de"))?, Some(R!(1)));
         assert_eq!(nt.find_bin(&idx, hex("1a"))?, None);
         assert_eq!(nt.find_bin(&idx, hex("ab"))?, None);
 
         // and with full binary Nodes
-        assert_eq!(nt.find_node(&idx, idx.get(&1).unwrap())?, Some(1));
-        let unknown = Node::from_hex(&hex_pad_right("3d")).unwrap();
+        assert_eq!(
+            nt.find_node(&idx, idx.get(&1.into()).unwrap())?,
+            Some(R!(1))
+        );
+        let unknown = Node::from_hex(hex_pad_right("3d")).unwrap();
         assert_eq!(nt.find_node(&idx, &unknown)?, None);
         Ok(())
     }
@@ -805,15 +832,15 @@
     #[test]
     fn test_immutable_find_one_jump() {
         let mut idx = TestIndex::new();
-        pad_insert(&mut idx, 9, "012");
-        pad_insert(&mut idx, 0, "00a");
+        pad_insert(&mut idx, R!(9), "012");
+        pad_insert(&mut idx, R!(0), "00a");
 
         let nt = sample_nodetree();
 
         assert_eq!(nt.find_bin(&idx, hex("0")), Err(MultipleResults));
-        assert_eq!(nt.find_bin(&idx, hex("01")), Ok(Some(9)));
+        assert_eq!(nt.find_bin(&idx, hex("01")), Ok(Some(R!(9))));
         assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
-        assert_eq!(nt.find_bin(&idx, hex("00a")), Ok(Some(0)));
+        assert_eq!(nt.find_bin(&idx, hex("00a")), Ok(Some(R!(0))));
         assert_eq!(nt.unique_prefix_len_bin(&idx, hex("00a")), Ok(Some(3)));
         assert_eq!(nt.find_bin(&idx, hex("000")), Ok(Some(NULL_REVISION)));
     }
@@ -821,11 +848,11 @@
     #[test]
     fn test_mutated_find() -> Result<(), NodeMapError> {
         let mut idx = TestIndex::new();
-        pad_insert(&mut idx, 9, "012");
-        pad_insert(&mut idx, 0, "00a");
-        pad_insert(&mut idx, 2, "cafe");
-        pad_insert(&mut idx, 3, "15");
-        pad_insert(&mut idx, 1, "10");
+        pad_insert(&mut idx, R!(9), "012");
+        pad_insert(&mut idx, R!(0), "00a");
+        pad_insert(&mut idx, R!(2), "cafe");
+        pad_insert(&mut idx, R!(3), "15");
+        pad_insert(&mut idx, R!(1), "10");
 
         let nt = NodeTree {
             readonly: sample_nodetree().readonly,
@@ -833,13 +860,13 @@
             root: block![0: Block(1), 1:Block(3), 12: Rev(2)],
             masked_inner_blocks: 1,
         };
-        assert_eq!(nt.find_bin(&idx, hex("10"))?, Some(1));
-        assert_eq!(nt.find_bin(&idx, hex("c"))?, Some(2));
+        assert_eq!(nt.find_bin(&idx, hex("10"))?, Some(R!(1)));
+        assert_eq!(nt.find_bin(&idx, hex("c"))?, Some(R!(2)));
         assert_eq!(nt.unique_prefix_len_bin(&idx, hex("c"))?, Some(1));
         assert_eq!(nt.find_bin(&idx, hex("00")), Err(MultipleResults));
         assert_eq!(nt.find_bin(&idx, hex("000"))?, Some(NULL_REVISION));
         assert_eq!(nt.unique_prefix_len_bin(&idx, hex("000"))?, Some(3));
-        assert_eq!(nt.find_bin(&idx, hex("01"))?, Some(9));
+        assert_eq!(nt.find_bin(&idx, hex("01"))?, Some(R!(9)));
         assert_eq!(nt.masked_readonly_blocks(), 2);
         Ok(())
     }
@@ -862,7 +889,7 @@
             rev: Revision,
             node: Node,
         ) -> Result<(), NodeMapError> {
-            self.index.insert(rev, node);
+            self.index.insert(rev.into(), node);
             self.nt.insert(&self.index, &node, rev)?;
             Ok(())
         }
@@ -872,7 +899,8 @@
             rev: Revision,
             hex: &str,
         ) -> Result<(), NodeMapError> {
-            return self.insert_node(rev, pad_node(hex));
+            let node = pad_node(hex);
+            self.insert_node(rev, node)
         }
 
         fn find_hex(
@@ -903,38 +931,44 @@
         }
     }
 
+    impl Default for TestNtIndex {
+        fn default() -> Self {
+            Self::new()
+        }
+    }
+
     #[test]
     fn test_insert_full_mutable() -> Result<(), NodeMapError> {
         let mut idx = TestNtIndex::new();
-        idx.insert(0, "1234")?;
-        assert_eq!(idx.find_hex("1")?, Some(0));
-        assert_eq!(idx.find_hex("12")?, Some(0));
+        idx.insert(Revision(0), "1234")?;
+        assert_eq!(idx.find_hex("1")?, Some(R!(0)));
+        assert_eq!(idx.find_hex("12")?, Some(R!(0)));
 
         // let's trigger a simple split
-        idx.insert(1, "1a34")?;
+        idx.insert(Revision(1), "1a34")?;
         assert_eq!(idx.nt.growable.len(), 1);
-        assert_eq!(idx.find_hex("12")?, Some(0));
-        assert_eq!(idx.find_hex("1a")?, Some(1));
+        assert_eq!(idx.find_hex("12")?, Some(R!(0)));
+        assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
 
         // reinserting is a no_op
-        idx.insert(1, "1a34")?;
+        idx.insert(Revision(1), "1a34")?;
         assert_eq!(idx.nt.growable.len(), 1);
-        assert_eq!(idx.find_hex("12")?, Some(0));
-        assert_eq!(idx.find_hex("1a")?, Some(1));
+        assert_eq!(idx.find_hex("12")?, Some(R!(0)));
+        assert_eq!(idx.find_hex("1a")?, Some(R!(1)));
 
-        idx.insert(2, "1a01")?;
+        idx.insert(Revision(2), "1a01")?;
         assert_eq!(idx.nt.growable.len(), 2);
         assert_eq!(idx.find_hex("1a"), Err(NodeMapError::MultipleResults));
-        assert_eq!(idx.find_hex("12")?, Some(0));
-        assert_eq!(idx.find_hex("1a3")?, Some(1));
-        assert_eq!(idx.find_hex("1a0")?, Some(2));
+        assert_eq!(idx.find_hex("12")?, Some(R!(0)));
+        assert_eq!(idx.find_hex("1a3")?, Some(R!(1)));
+        assert_eq!(idx.find_hex("1a0")?, Some(R!(2)));
         assert_eq!(idx.find_hex("1a12")?, None);
 
         // now let's make it split and create more than one additional block
-        idx.insert(3, "1a345")?;
+        idx.insert(Revision(3), "1a345")?;
         assert_eq!(idx.nt.growable.len(), 4);
-        assert_eq!(idx.find_hex("1a340")?, Some(1));
-        assert_eq!(idx.find_hex("1a345")?, Some(3));
+        assert_eq!(idx.find_hex("1a340")?, Some(R!(1)));
+        assert_eq!(idx.find_hex("1a345")?, Some(R!(3)));
         assert_eq!(idx.find_hex("1a341")?, None);
 
         // there's no readonly block to mask
@@ -945,7 +979,7 @@
     #[test]
     fn test_unique_prefix_len_zero_prefix() {
         let mut idx = TestNtIndex::new();
-        idx.insert(0, "00000abcd").unwrap();
+        idx.insert(Revision(0), "00000abcd").unwrap();
 
         assert_eq!(idx.find_hex("000"), Err(NodeMapError::MultipleResults));
         // in the nodetree proper, this will be found at the first nybble
@@ -955,7 +989,7 @@
         assert_eq!(idx.unique_prefix_len_hex("00000ab"), Ok(Some(6)));
 
         // same with odd result
-        idx.insert(1, "00123").unwrap();
+        idx.insert(Revision(1), "00123").unwrap();
         assert_eq!(idx.unique_prefix_len_hex("001"), Ok(Some(3)));
         assert_eq!(idx.unique_prefix_len_hex("0012"), Ok(Some(3)));
 
@@ -975,49 +1009,49 @@
         let mut node1_hex = hex_pad_right("444444");
         node1_hex.pop();
         node1_hex.push('5');
-        let node0 = Node::from_hex(&node0_hex).unwrap();
+        let node0 = Node::from_hex(node0_hex).unwrap();
         let node1 = Node::from_hex(&node1_hex).unwrap();
 
-        idx.insert(0, node0);
-        nt.insert(idx, &node0, 0)?;
-        idx.insert(1, node1);
-        nt.insert(idx, &node1, 1)?;
+        idx.insert(0.into(), node0);
+        nt.insert(idx, &node0, R!(0))?;
+        idx.insert(1.into(), node1);
+        nt.insert(idx, &node1, R!(1))?;
 
-        assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(0));
-        assert_eq!(nt.find_bin(idx, (&node1).into())?, Some(1));
+        assert_eq!(nt.find_bin(idx, (&node0).into())?, Some(R!(0)));
+        assert_eq!(nt.find_bin(idx, (&node1).into())?, Some(R!(1)));
         Ok(())
     }
 
     #[test]
     fn test_insert_partly_immutable() -> Result<(), NodeMapError> {
         let mut idx = TestNtIndex::new();
-        idx.insert(0, "1234")?;
-        idx.insert(1, "1235")?;
-        idx.insert(2, "131")?;
-        idx.insert(3, "cafe")?;
+        idx.insert(Revision(0), "1234")?;
+        idx.insert(Revision(1), "1235")?;
+        idx.insert(Revision(2), "131")?;
+        idx.insert(Revision(3), "cafe")?;
         let mut idx = idx.commit();
-        assert_eq!(idx.find_hex("1234")?, Some(0));
-        assert_eq!(idx.find_hex("1235")?, Some(1));
-        assert_eq!(idx.find_hex("131")?, Some(2));
-        assert_eq!(idx.find_hex("cafe")?, Some(3));
+        assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
+        assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
+        assert_eq!(idx.find_hex("131")?, Some(R!(2)));
+        assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
         // we did not add anything since init from readonly
         assert_eq!(idx.nt.masked_readonly_blocks(), 0);
 
-        idx.insert(4, "123A")?;
-        assert_eq!(idx.find_hex("1234")?, Some(0));
-        assert_eq!(idx.find_hex("1235")?, Some(1));
-        assert_eq!(idx.find_hex("131")?, Some(2));
-        assert_eq!(idx.find_hex("cafe")?, Some(3));
-        assert_eq!(idx.find_hex("123A")?, Some(4));
+        idx.insert(Revision(4), "123A")?;
+        assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
+        assert_eq!(idx.find_hex("1235")?, Some(R!(1)));
+        assert_eq!(idx.find_hex("131")?, Some(R!(2)));
+        assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
+        assert_eq!(idx.find_hex("123A")?, Some(R!(4)));
         // we masked blocks for all prefixes of "123", including the root
         assert_eq!(idx.nt.masked_readonly_blocks(), 4);
 
         eprintln!("{:?}", idx.nt);
-        idx.insert(5, "c0")?;
-        assert_eq!(idx.find_hex("cafe")?, Some(3));
-        assert_eq!(idx.find_hex("c0")?, Some(5));
+        idx.insert(Revision(5), "c0")?;
+        assert_eq!(idx.find_hex("cafe")?, Some(R!(3)));
+        assert_eq!(idx.find_hex("c0")?, Some(R!(5)));
         assert_eq!(idx.find_hex("c1")?, None);
-        assert_eq!(idx.find_hex("1234")?, Some(0));
+        assert_eq!(idx.find_hex("1234")?, Some(R!(0)));
         // inserting "c0" is just splitting the 'c' slot of the mutable root,
         // it doesn't mask anything
         assert_eq!(idx.nt.masked_readonly_blocks(), 4);
@@ -1028,10 +1062,10 @@
     #[test]
     fn test_invalidate_all() -> Result<(), NodeMapError> {
         let mut idx = TestNtIndex::new();
-        idx.insert(0, "1234")?;
-        idx.insert(1, "1235")?;
-        idx.insert(2, "131")?;
-        idx.insert(3, "cafe")?;
+        idx.insert(Revision(0), "1234")?;
+        idx.insert(Revision(1), "1235")?;
+        idx.insert(Revision(2), "131")?;
+        idx.insert(Revision(3), "cafe")?;
         let mut idx = idx.commit();
 
         idx.nt.invalidate_all();
@@ -1058,9 +1092,9 @@
     #[test]
     fn test_into_added_bytes() -> Result<(), NodeMapError> {
         let mut idx = TestNtIndex::new();
-        idx.insert(0, "1234")?;
+        idx.insert(Revision(0), "1234")?;
         let mut idx = idx.commit();
-        idx.insert(4, "cafe")?;
+        idx.insert(Revision(4), "cafe")?;
         let (_, bytes) = idx.nt.into_readonly_and_added_bytes();
 
         // only the root block has been changed
--- a/rust/hg-core/src/revlog/nodemap_docket.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revlog/nodemap_docket.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -75,7 +75,7 @@
         // TODO: use `vfs.read()` here when the `persistent-nodemap.mmap`
         // config is false?
         if let Some(mmap) =
-            store_vfs.mmap_open(&data_path).io_not_found_as_none()?
+            store_vfs.mmap_open(data_path).io_not_found_as_none()?
         {
             if mmap.len() >= data_length {
                 Ok(Some((docket, mmap)))
--- a/rust/hg-core/src/revset.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/revset.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -53,9 +53,11 @@
     if let Ok(integer) = input.parse::<i32>() {
         if integer.to_string() == input
             && integer >= 0
-            && revlog.has_rev(integer)
+            && revlog.has_rev(integer.into())
         {
-            return Ok(integer);
+            // This is fine because we've just checked that the revision is
+            // valid for the given revlog.
+            return Ok(Revision(integer));
         }
     }
     if let Ok(prefix) = NodePrefix::from_hex(input) {
--- a/rust/hg-core/src/sparse.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/sparse.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -282,7 +282,8 @@
                 let (patterns, subwarnings) = parse_pattern_file_contents(
                     &config.includes,
                     Path::new(""),
-                    Some(b"glob:".as_ref()),
+                    Some(PatternSyntax::Glob),
+                    false,
                     false,
                 )?;
                 warnings.extend(subwarnings.into_iter().map(From::from));
@@ -292,7 +293,8 @@
                 let (patterns, subwarnings) = parse_pattern_file_contents(
                     &config.excludes,
                     Path::new(""),
-                    Some(b"glob:".as_ref()),
+                    Some(PatternSyntax::Glob),
+                    false,
                     false,
                 )?;
                 warnings.extend(subwarnings.into_iter().map(From::from));
--- a/rust/hg-core/src/testing.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/testing.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -41,22 +41,27 @@
 
 impl Graph for SampleGraph {
     fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
-        match rev {
-            0 => Ok([NULL_REVISION, NULL_REVISION]),
-            1 => Ok([0, NULL_REVISION]),
-            2 => Ok([1, NULL_REVISION]),
-            3 => Ok([1, NULL_REVISION]),
-            4 => Ok([2, NULL_REVISION]),
-            5 => Ok([4, NULL_REVISION]),
-            6 => Ok([4, NULL_REVISION]),
-            7 => Ok([4, NULL_REVISION]),
-            8 => Ok([NULL_REVISION, NULL_REVISION]),
+        let null_rev = NULL_REVISION.0;
+        let res = match rev.0 {
+            0 => Ok([null_rev, null_rev]),
+            1 => Ok([0, null_rev]),
+            2 => Ok([1, null_rev]),
+            3 => Ok([1, null_rev]),
+            4 => Ok([2, null_rev]),
+            5 => Ok([4, null_rev]),
+            6 => Ok([4, null_rev]),
+            7 => Ok([4, null_rev]),
+            8 => Ok([null_rev, null_rev]),
             9 => Ok([6, 7]),
-            10 => Ok([5, NULL_REVISION]),
+            10 => Ok([5, null_rev]),
             11 => Ok([3, 7]),
-            12 => Ok([9, NULL_REVISION]),
-            13 => Ok([8, NULL_REVISION]),
-            r => Err(GraphError::ParentOutOfRange(r)),
+            12 => Ok([9, null_rev]),
+            13 => Ok([8, null_rev]),
+            r => Err(GraphError::ParentOutOfRange(Revision(r))),
+        };
+        match res {
+            Ok([a, b]) => Ok([Revision(a), Revision(b)]),
+            Err(e) => Err(e),
         }
     }
 }
@@ -67,6 +72,6 @@
 
 impl Graph for VecGraph {
     fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
-        Ok(self[rev as usize])
+        Ok(self[rev.0 as usize])
     }
 }
--- a/rust/hg-core/src/utils/files.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/utils/files.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -192,13 +192,13 @@
     let name = name.as_ref();
 
     let name = if !name.is_absolute() {
-        root.join(&cwd).join(&name)
+        root.join(cwd).join(name)
     } else {
         name.to_owned()
     };
-    let auditor = PathAuditor::new(&root);
-    if name != root && name.starts_with(&root) {
-        let name = name.strip_prefix(&root).unwrap();
+    let auditor = PathAuditor::new(root);
+    if name != root && name.starts_with(root) {
+        let name = name.strip_prefix(root).unwrap();
         auditor.audit_path(path_to_hg_path_buf(name)?)?;
         Ok(name.to_owned())
     } else if name == root {
@@ -210,7 +210,7 @@
         let mut name = name.deref();
         let original_name = name.to_owned();
         loop {
-            let same = is_same_file(&name, &root).unwrap_or(false);
+            let same = is_same_file(name, root).unwrap_or(false);
             if same {
                 if name == original_name {
                     // `name` was actually the same as root (maybe a symlink)
@@ -218,8 +218,8 @@
                 }
                 // `name` is a symlink to root, so `original_name` is under
                 // root
-                let rel_path = original_name.strip_prefix(&name).unwrap();
-                auditor.audit_path(path_to_hg_path_buf(&rel_path)?)?;
+                let rel_path = original_name.strip_prefix(name).unwrap();
+                auditor.audit_path(path_to_hg_path_buf(rel_path)?)?;
                 return Ok(rel_path.to_owned());
             }
             name = match name.parent() {
@@ -429,7 +429,7 @@
             })
         );
         assert_eq!(
-            canonical_path(&root, Path::new(""), &under_repo_symlink),
+            canonical_path(&root, Path::new(""), under_repo_symlink),
             Ok(PathBuf::from("d"))
         );
     }
--- a/rust/hg-core/src/utils/path_auditor.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/src/utils/path_auditor.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -117,7 +117,7 @@
             if self.audited_dirs.read().unwrap().contains(prefix) {
                 continue;
             }
-            self.check_filesystem(&prefix, &path)?;
+            self.check_filesystem(prefix, path)?;
             self.audited_dirs.write().unwrap().insert(prefix.to_owned());
         }
 
@@ -203,12 +203,12 @@
             })
         );
 
-        create_dir(&base_dir_path.join("realdir")).unwrap();
-        File::create(&base_dir_path.join("realdir/realfile")).unwrap();
+        create_dir(base_dir_path.join("realdir")).unwrap();
+        File::create(base_dir_path.join("realdir/realfile")).unwrap();
         // TODO make portable
         std::os::unix::fs::symlink(
-            &base_dir_path.join("realdir"),
-            &base_dir_path.join("symlink"),
+            base_dir_path.join("realdir"),
+            base_dir_path.join("symlink"),
         )
         .unwrap();
         let path = HgPath::new(b"symlink/realfile");
--- a/rust/hg-core/tests/test_missing_ancestors.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-core/tests/test_missing_ancestors.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -26,25 +26,28 @@
         if i == 0 || rng.gen_bool(rootprob) {
             vg.push([NULL_REVISION, NULL_REVISION])
         } else if i == 1 {
-            vg.push([0, NULL_REVISION])
+            vg.push([Revision(0), NULL_REVISION])
         } else if rng.gen_bool(mergeprob) {
             let p1 = {
                 if i == 2 || rng.gen_bool(prevprob) {
-                    (i - 1) as Revision
+                    Revision((i - 1) as BaseRevision)
                 } else {
-                    rng.gen_range(0..i - 1) as Revision
+                    Revision(rng.gen_range(0..i - 1) as BaseRevision)
                 }
             };
             // p2 is a random revision lower than i and different from p1
-            let mut p2 = rng.gen_range(0..i - 1) as Revision;
+            let mut p2 = Revision(rng.gen_range(0..i - 1) as BaseRevision);
             if p2 >= p1 {
-                p2 += 1;
+                p2.0 += 1;
             }
             vg.push([p1, p2]);
         } else if rng.gen_bool(prevprob) {
-            vg.push([(i - 1) as Revision, NULL_REVISION])
+            vg.push([Revision((i - 1) as BaseRevision), NULL_REVISION])
         } else {
-            vg.push([rng.gen_range(0..i - 1) as Revision, NULL_REVISION])
+            vg.push([
+                Revision(rng.gen_range(0..i - 1) as BaseRevision),
+                NULL_REVISION,
+            ])
         }
     }
     vg
@@ -55,10 +58,10 @@
     let mut ancs: Vec<HashSet<Revision>> = Vec::new();
     (0..vg.len()).for_each(|i| {
         let mut ancs_i = HashSet::new();
-        ancs_i.insert(i as Revision);
+        ancs_i.insert(Revision(i as BaseRevision));
         for p in vg[i].iter().cloned() {
             if p != NULL_REVISION {
-                ancs_i.extend(&ancs[p as usize]);
+                ancs_i.extend(&ancs[p.0 as usize]);
             }
         }
         ancs.push(ancs_i);
@@ -115,7 +118,7 @@
             .push(MissingAncestorsAction::RemoveAncestorsFrom(revs.clone()));
         for base in self.bases.iter().cloned() {
             if base != NULL_REVISION {
-                for rev in &self.ancestors_sets[base as usize] {
+                for rev in &self.ancestors_sets[base.0 as usize] {
                     revs.remove(rev);
                 }
             }
@@ -131,7 +134,7 @@
         let mut missing: HashSet<Revision> = HashSet::new();
         for rev in revs_as_set.iter().cloned() {
             if rev != NULL_REVISION {
-                missing.extend(&self.ancestors_sets[rev as usize])
+                missing.extend(&self.ancestors_sets[rev.0 as usize])
             }
         }
         self.history
@@ -139,7 +142,7 @@
 
         for base in self.bases.iter().cloned() {
             if base != NULL_REVISION {
-                for rev in &self.ancestors_sets[base as usize] {
+                for rev in &self.ancestors_sets[base.0 as usize] {
                     missing.remove(rev);
                 }
             }
@@ -193,10 +196,10 @@
     let sigma = sigma_opt.unwrap_or(0.8);
 
     let log_normal = LogNormal::new(mu, sigma).unwrap();
-    let nb = min(maxrev as usize, log_normal.sample(rng).floor() as usize);
+    let nb = min(maxrev.0 as usize, log_normal.sample(rng).floor() as usize);
 
-    let dist = Uniform::from(NULL_REVISION..maxrev);
-    rng.sample_iter(&dist).take(nb).collect()
+    let dist = Uniform::from(NULL_REVISION.0..maxrev.0);
+    rng.sample_iter(&dist).take(nb).map(Revision).collect()
 }
 
 /// Produces the hexadecimal representation of a slice of bytes
@@ -294,7 +297,7 @@
             eprintln!("Tested with {} graphs", g);
         }
         let graph = build_random_graph(None, None, None, None);
-        let graph_len = graph.len() as Revision;
+        let graph_len = Revision(graph.len() as BaseRevision);
         let ancestors_sets = ancestors_sets(&graph);
         for _testno in 0..testcount {
             let bases: HashSet<Revision> =
--- a/rust/hg-cpython/src/ancestors.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/ancestors.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -35,6 +35,7 @@
 //! [`MissingAncestors`]: struct.MissingAncestors.html
 //! [`AncestorsIterator`]: struct.AncestorsIterator.html
 use crate::revlog::pyindex_to_graph;
+use crate::PyRevision;
 use crate::{
     cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
 };
@@ -54,16 +55,16 @@
 py_class!(pub class AncestorsIterator |py| {
     data inner: RefCell<Box<VCGAncestorsIterator<Index>>>;
 
-    def __next__(&self) -> PyResult<Option<Revision>> {
+    def __next__(&self) -> PyResult<Option<PyRevision>> {
         match self.inner(py).borrow_mut().next() {
             Some(Err(e)) => Err(GraphError::pynew_from_vcsgraph(py, e)),
             None => Ok(None),
-            Some(Ok(r)) => Ok(Some(r)),
+            Some(Ok(r)) => Ok(Some(PyRevision(r))),
         }
     }
 
-    def __contains__(&self, rev: Revision) -> PyResult<bool> {
-        self.inner(py).borrow_mut().contains(rev)
+    def __contains__(&self, rev: PyRevision) -> PyResult<bool> {
+        self.inner(py).borrow_mut().contains(rev.0)
             .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))
     }
 
@@ -71,13 +72,19 @@
         Ok(self.clone_ref(py))
     }
 
-    def __new__(_cls, index: PyObject, initrevs: PyObject, stoprev: Revision,
-                inclusive: bool) -> PyResult<AncestorsIterator> {
-        let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
+    def __new__(
+        _cls,
+        index: PyObject,
+        initrevs: PyObject,
+        stoprev: PyRevision,
+        inclusive: bool
+    ) -> PyResult<AncestorsIterator> {
+        let index = pyindex_to_graph(py, index)?;
+        let initvec: Vec<_> = rev_pyiter_collect(py, &initrevs, &index)?;
         let ait = VCGAncestorsIterator::new(
-            pyindex_to_graph(py, index)?,
-            initvec,
-            stoprev,
+            index,
+            initvec.into_iter().map(|r| r.0),
+            stoprev.0,
             inclusive,
         )
         .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?;
@@ -98,10 +105,10 @@
 py_class!(pub class LazyAncestors |py| {
     data inner: RefCell<Box<VCGLazyAncestors<Index>>>;
 
-    def __contains__(&self, rev: Revision) -> PyResult<bool> {
+    def __contains__(&self, rev: PyRevision) -> PyResult<bool> {
         self.inner(py)
             .borrow_mut()
-            .contains(rev)
+            .contains(rev.0)
             .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))
     }
 
@@ -113,14 +120,24 @@
         Ok(!self.inner(py).borrow().is_empty())
     }
 
-    def __new__(_cls, index: PyObject, initrevs: PyObject, stoprev: Revision,
-                inclusive: bool) -> PyResult<Self> {
-        let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
+    def __new__(
+        _cls,
+        index: PyObject,
+        initrevs: PyObject,
+        stoprev: PyRevision,
+        inclusive: bool
+    ) -> PyResult<Self> {
+        let index = pyindex_to_graph(py, index)?;
+        let initvec: Vec<_> = rev_pyiter_collect(py, &initrevs, &index)?;
 
         let lazy =
-            VCGLazyAncestors::new(pyindex_to_graph(py, index)?,
-                          initvec, stoprev, inclusive)
-                .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?;
+            VCGLazyAncestors::new(
+                index,
+                initvec.into_iter().map(|r| r.0),
+                stoprev.0,
+                inclusive
+            )
+            .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))?;
 
         Self::create_instance(py, RefCell::new(Box::new(lazy)))
         }
@@ -129,6 +146,7 @@
 
 py_class!(pub class MissingAncestors |py| {
     data inner: RefCell<Box<CoreMissing<Index>>>;
+    data index: RefCell<Index>;
 
     def __new__(
         _cls,
@@ -136,9 +154,15 @@
         bases: PyObject
     )
     -> PyResult<MissingAncestors> {
-        let bases_vec: Vec<Revision> = rev_pyiter_collect(py, &bases)?;
-        let inner = CoreMissing::new(pyindex_to_graph(py, index)?, bases_vec);
-        MissingAncestors::create_instance(py, RefCell::new(Box::new(inner)))
+        let index = pyindex_to_graph(py, index)?;
+        let bases_vec: Vec<_> = rev_pyiter_collect(py, &bases, &index)?;
+
+        let inner = CoreMissing::new(index.clone_ref(py), bases_vec);
+        MissingAncestors::create_instance(
+            py,
+            RefCell::new(Box::new(inner)),
+            RefCell::new(index)
+        )
     }
 
     def hasbases(&self) -> PyResult<bool> {
@@ -146,8 +170,9 @@
     }
 
     def addbases(&self, bases: PyObject) -> PyResult<PyObject> {
+        let index = self.index(py).borrow();
+        let bases_vec: Vec<_> = rev_pyiter_collect(py, &bases, &*index)?;
         let mut inner = self.inner(py).borrow_mut();
-        let bases_vec: Vec<Revision> = rev_pyiter_collect(py, &bases)?;
         inner.add_bases(bases_vec);
         // cpython doc has examples with PyResult<()> but this gives me
         //   the trait `cpython::ToPyObject` is not implemented for `()`
@@ -155,17 +180,31 @@
         Ok(py.None())
     }
 
-    def bases(&self) -> PyResult<HashSet<Revision>> {
-        Ok(self.inner(py).borrow().get_bases().clone())
+    def bases(&self) -> PyResult<HashSet<PyRevision>> {
+        Ok(
+            self.inner(py)
+                .borrow()
+                .get_bases()
+                .iter()
+                .map(|r| PyRevision(r.0))
+                .collect()
+        )
     }
 
-    def basesheads(&self) -> PyResult<HashSet<Revision>> {
+    def basesheads(&self) -> PyResult<HashSet<PyRevision>> {
         let inner = self.inner(py).borrow();
-        inner.bases_heads().map_err(|e| GraphError::pynew(py, e))
+        Ok(
+            inner
+                .bases_heads()
+                .map_err(|e| GraphError::pynew(py, e))?
+                .into_iter()
+                .map(|r| PyRevision(r.0))
+                .collect()
+        )
     }
 
     def removeancestorsfrom(&self, revs: PyObject) -> PyResult<PyObject> {
-        let mut inner = self.inner(py).borrow_mut();
+        let index = self.index(py).borrow();
         // this is very lame: we convert to a Rust set, update it in place
         // and then convert back to Python, only to have Python remove the
         // excess (thankfully, Python is happy with a list or even an iterator)
@@ -174,7 +213,10 @@
         //    discard
         //  - define a trait for sets of revisions in the core and implement
         //    it for a Python set rewrapped with the GIL marker
-        let mut revs_pyset: HashSet<Revision> = rev_pyiter_collect(py, &revs)?;
+        let mut revs_pyset: HashSet<Revision> = rev_pyiter_collect(
+            py, &revs, &*index
+        )?;
+        let mut inner = self.inner(py).borrow_mut();
         inner.remove_ancestors_from(&mut revs_pyset)
             .map_err(|e| GraphError::pynew(py, e))?;
 
@@ -182,15 +224,19 @@
         let mut remaining_pyint_vec: Vec<PyObject> = Vec::with_capacity(
             revs_pyset.len());
         for rev in revs_pyset {
-            remaining_pyint_vec.push(rev.to_py_object(py).into_object());
+            remaining_pyint_vec.push(
+                PyRevision(rev.0).to_py_object(py).into_object()
+            );
         }
         let remaining_pylist = PyList::new(py, remaining_pyint_vec.as_slice());
         revs.call_method(py, "intersection_update", (remaining_pylist, ), None)
     }
 
     def missingancestors(&self, revs: PyObject) -> PyResult<PyList> {
+        let index = self.index(py).borrow();
+        let revs_vec: Vec<Revision> = rev_pyiter_collect(py, &revs, &*index)?;
+
         let mut inner = self.inner(py).borrow_mut();
-        let revs_vec: Vec<Revision> = rev_pyiter_collect(py, &revs)?;
         let missing_vec = match inner.missing_ancestors(revs_vec) {
             Ok(missing) => missing,
             Err(e) => {
@@ -201,7 +247,9 @@
         let mut missing_pyint_vec: Vec<PyObject> = Vec::with_capacity(
             missing_vec.len());
         for rev in missing_vec {
-            missing_pyint_vec.push(rev.to_py_object(py).into_object());
+            missing_pyint_vec.push(
+                PyRevision(rev.0).to_py_object(py).into_object()
+            );
         }
         Ok(PyList::new(py, missing_pyint_vec.as_slice()))
     }
--- a/rust/hg-cpython/src/cindex.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/cindex.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -15,7 +15,7 @@
     PyObject, PyResult, PyTuple, Python, PythonObject,
 };
 use hg::revlog::{Node, RevlogIndex};
-use hg::{Graph, GraphError, Revision, WORKING_DIRECTORY_REVISION};
+use hg::{BaseRevision, Graph, GraphError, Revision};
 use libc::{c_int, ssize_t};
 
 const REVLOG_CABI_VERSION: c_int = 3;
@@ -141,19 +141,16 @@
 impl Graph for Index {
     /// wrap a call to the C extern parents function
     fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
-        if rev == WORKING_DIRECTORY_REVISION {
-            return Err(GraphError::WorkingDirectoryUnsupported);
-        }
         let mut res: [c_int; 2] = [0; 2];
         let code = unsafe {
             (self.capi.index_parents)(
                 self.index.as_ptr(),
-                rev as c_int,
+                rev.0 as c_int,
                 &mut res as *mut [c_int; 2],
             )
         };
         match code {
-            0 => Ok(res),
+            0 => Ok([Revision(res[0]), Revision(res[1])]),
             _ => Err(GraphError::ParentOutOfRange(rev)),
         }
     }
@@ -162,17 +159,18 @@
 impl vcsgraph::graph::Graph for Index {
     fn parents(
         &self,
-        rev: Revision,
+        rev: BaseRevision,
     ) -> Result<vcsgraph::graph::Parents, vcsgraph::graph::GraphReadError>
     {
-        match Graph::parents(self, rev) {
-            Ok(parents) => Ok(vcsgraph::graph::Parents(parents)),
+        // FIXME This trait should be reworked to decide between Revision
+        // and UncheckedRevision, get better errors names, etc.
+        match Graph::parents(self, Revision(rev)) {
+            Ok(parents) => {
+                Ok(vcsgraph::graph::Parents([parents[0].0, parents[1].0]))
+            }
             Err(GraphError::ParentOutOfRange(rev)) => {
-                Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev))
+                Err(vcsgraph::graph::GraphReadError::KeyedInvalidKey(rev.0))
             }
-            Err(GraphError::WorkingDirectoryUnsupported) => Err(
-                vcsgraph::graph::GraphReadError::WorkingDirectoryUnsupported,
-            ),
         }
     }
 }
@@ -180,7 +178,7 @@
 impl vcsgraph::graph::RankedGraph for Index {
     fn rank(
         &self,
-        rev: Revision,
+        rev: BaseRevision,
     ) -> Result<vcsgraph::graph::Rank, vcsgraph::graph::GraphReadError> {
         match unsafe {
             (self.capi.fast_rank)(self.index.as_ptr(), rev as ssize_t)
@@ -200,7 +198,7 @@
 
     fn node(&self, rev: Revision) -> Option<&Node> {
         let raw = unsafe {
-            (self.capi.index_node)(self.index.as_ptr(), rev as ssize_t)
+            (self.capi.index_node)(self.index.as_ptr(), rev.0 as ssize_t)
         };
         if raw.is_null() {
             None
--- a/rust/hg-cpython/src/conversion.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/conversion.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -8,8 +8,10 @@
 //! Bindings for the hg::ancestors module provided by the
 //! `hg-core` crate. From Python, this will be seen as `rustext.ancestor`
 
-use cpython::{ObjectProtocol, PyObject, PyResult, Python};
-use hg::Revision;
+use cpython::{ObjectProtocol, PyErr, PyObject, PyResult, Python};
+use hg::{Revision, RevlogIndex, UncheckedRevision};
+
+use crate::{exceptions::GraphError, PyRevision};
 
 /// Utility function to convert a Python iterable into various collections
 ///
@@ -17,11 +19,28 @@
 /// with `impl IntoIterator<Item=Revision>` arguments, because
 /// a `PyErr` can arise at each step of iteration, whereas these methods
 /// expect iterables over `Revision`, not over some `Result<Revision, PyErr>`
-pub fn rev_pyiter_collect<C>(py: Python, revs: &PyObject) -> PyResult<C>
+pub fn rev_pyiter_collect<C, I>(
+    py: Python,
+    revs: &PyObject,
+    index: &I,
+) -> PyResult<C>
 where
     C: FromIterator<Revision>,
+    I: RevlogIndex,
 {
     revs.iter(py)?
-        .map(|r| r.and_then(|o| o.extract::<Revision>(py)))
+        .map(|r| {
+            r.and_then(|o| match o.extract::<PyRevision>(py) {
+                Ok(r) => index
+                    .check_revision(UncheckedRevision(r.0))
+                    .ok_or_else(|| {
+                        PyErr::new::<GraphError, _>(
+                            py,
+                            ("InvalidRevision", r.0),
+                        )
+                    }),
+                Err(e) => Err(e),
+            })
+        })
         .collect()
 }
--- a/rust/hg-cpython/src/copy_tracing.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/copy_tracing.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -14,6 +14,7 @@
 use hg::Revision;
 
 use crate::pybytes_deref::PyBytesDeref;
+use crate::PyRevision;
 
 /// Combines copies information contained into revision `revs` to build a copy
 /// map.
@@ -23,14 +24,17 @@
     py: Python,
     revs: PyList,
     children_count: PyDict,
-    target_rev: Revision,
+    target_rev: PyRevision,
     rev_info: PyObject,
     multi_thread: bool,
 ) -> PyResult<PyDict> {
+    let target_rev = Revision(target_rev.0);
     let children_count = children_count
         .items(py)
         .iter()
-        .map(|(k, v)| Ok((k.extract(py)?, v.extract(py)?)))
+        .map(|(k, v)| {
+            Ok((Revision(k.extract::<PyRevision>(py)?.0), v.extract(py)?))
+        })
         .collect::<PyResult<_>>()?;
 
     /// (Revision number, parent 1, parent 2, copy data for this revision)
@@ -38,11 +42,13 @@
 
     let revs_info =
         revs.iter(py).map(|rev_py| -> PyResult<RevInfo<PyBytes>> {
-            let rev = rev_py.extract(py)?;
+            let rev = Revision(rev_py.extract::<PyRevision>(py)?.0);
             let tuple: PyTuple =
                 rev_info.call(py, (rev_py,), None)?.cast_into(py)?;
-            let p1 = tuple.get_item(py, 0).extract(py)?;
-            let p2 = tuple.get_item(py, 1).extract(py)?;
+            let p1 =
+                Revision(tuple.get_item(py, 0).extract::<PyRevision>(py)?.0);
+            let p2 =
+                Revision(tuple.get_item(py, 1).extract::<PyRevision>(py)?.0);
             let opt_bytes = tuple.get_item(py, 2).extract(py)?;
             Ok((rev, p1, p2, opt_bytes))
         });
@@ -179,7 +185,7 @@
             combine_changeset_copies_wrapper(
                 revs: PyList,
                 children: PyDict,
-                target_rev: Revision,
+                target_rev: PyRevision,
                 rev_info: PyObject,
                 multi_thread: bool
             )
--- a/rust/hg-cpython/src/dagops.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/dagops.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -9,6 +9,7 @@
 //! `hg-core` package.
 //!
 //! From Python, this will be seen as `mercurial.rustext.dagop`
+use crate::PyRevision;
 use crate::{conversion::rev_pyiter_collect, exceptions::GraphError};
 use cpython::{PyDict, PyModule, PyObject, PyResult, Python};
 use hg::dagops;
@@ -26,11 +27,12 @@
     py: Python,
     index: PyObject,
     revs: PyObject,
-) -> PyResult<HashSet<Revision>> {
-    let mut as_set: HashSet<Revision> = rev_pyiter_collect(py, &revs)?;
-    dagops::retain_heads(&pyindex_to_graph(py, index)?, &mut as_set)
+) -> PyResult<HashSet<PyRevision>> {
+    let index = pyindex_to_graph(py, index)?;
+    let mut as_set: HashSet<Revision> = rev_pyiter_collect(py, &revs, &index)?;
+    dagops::retain_heads(&index, &mut as_set)
         .map_err(|e| GraphError::pynew(py, e))?;
-    Ok(as_set)
+    Ok(as_set.into_iter().map(Into::into).collect())
 }
 
 /// Computes the rank, i.e. the number of ancestors including itself,
@@ -38,10 +40,10 @@
 pub fn rank(
     py: Python,
     index: PyObject,
-    p1r: Revision,
-    p2r: Revision,
+    p1r: PyRevision,
+    p2r: PyRevision,
 ) -> PyResult<Rank> {
-    node_rank(&pyindex_to_graph(py, index)?, &Parents([p1r, p2r]))
+    node_rank(&pyindex_to_graph(py, index)?, &Parents([p1r.0, p2r.0]))
         .map_err(|e| GraphError::pynew_from_vcsgraph(py, e))
 }
 
@@ -59,7 +61,7 @@
     m.add(
         py,
         "rank",
-        py_fn!(py, rank(index: PyObject, p1r: Revision, p2r: Revision)),
+        py_fn!(py, rank(index: PyObject, p1r: PyRevision, p2r: PyRevision)),
     )?;
 
     let sys = PyModule::import(py, "sys")?;
--- a/rust/hg-cpython/src/dirstate/status.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/dirstate/status.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -273,7 +273,7 @@
                 py_warnings.append(
                     py,
                     (
-                        PyBytes::new(py, &get_bytes_from_path(&file)),
+                        PyBytes::new(py, &get_bytes_from_path(file)),
                         PyBytes::new(py, syn),
                     )
                         .to_py_object(py)
@@ -282,7 +282,7 @@
             }
             PatternFileWarning::NoSuchFile(file) => py_warnings.append(
                 py,
-                PyBytes::new(py, &get_bytes_from_path(&file)).into_object(),
+                PyBytes::new(py, &get_bytes_from_path(file)).into_object(),
             ),
         }
     }
--- a/rust/hg-cpython/src/discovery.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/discovery.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -12,12 +12,13 @@
 //! - [`PartialDiscover`] is the Rust implementation of
 //!   `mercurial.setdiscovery.partialdiscovery`.
 
+use crate::PyRevision;
 use crate::{
     cindex::Index, conversion::rev_pyiter_collect, exceptions::GraphError,
 };
 use cpython::{
-    ObjectProtocol, PyDict, PyModule, PyObject, PyResult, PyTuple, Python,
-    PythonObject, ToPyObject,
+    ObjectProtocol, PyClone, PyDict, PyModule, PyObject, PyResult, PyTuple,
+    Python, PythonObject, ToPyObject,
 };
 use hg::discovery::PartialDiscovery as CorePartialDiscovery;
 use hg::Revision;
@@ -29,6 +30,7 @@
 
 py_class!(pub class PartialDiscovery |py| {
     data inner: RefCell<Box<CorePartialDiscovery<Index>>>;
+    data index: RefCell<Index>;
 
     // `_respectsize` is currently only here to replicate the Python API and
     // will be used in future patches inside methods that are yet to be
@@ -41,28 +43,33 @@
         randomize: bool = true
     ) -> PyResult<PartialDiscovery> {
         let index = repo.getattr(py, "changelog")?.getattr(py, "index")?;
+        let index = pyindex_to_graph(py, index)?;
+        let target_heads = rev_pyiter_collect(py, &targetheads, &index)?;
         Self::create_instance(
             py,
             RefCell::new(Box::new(CorePartialDiscovery::new(
-                pyindex_to_graph(py, index)?,
-                rev_pyiter_collect(py, &targetheads)?,
+                index.clone_ref(py),
+                target_heads,
                 respectsize,
                 randomize,
-            )))
+            ))),
+            RefCell::new(index),
         )
     }
 
     def addcommons(&self, commons: PyObject) -> PyResult<PyObject> {
+        let index = self.index(py).borrow();
+        let commons_vec: Vec<_> = rev_pyiter_collect(py, &commons, &*index)?;
         let mut inner = self.inner(py).borrow_mut();
-        let commons_vec: Vec<Revision> = rev_pyiter_collect(py, &commons)?;
         inner.add_common_revisions(commons_vec)
-            .map_err(|e| GraphError::pynew(py, e))?;
-        Ok(py.None())
-    }
+        .map_err(|e| GraphError::pynew(py, e))?;
+    Ok(py.None())
+}
 
     def addmissings(&self, missings: PyObject) -> PyResult<PyObject> {
+        let index = self.index(py).borrow();
+        let missings_vec: Vec<_> = rev_pyiter_collect(py, &missings, &*index)?;
         let mut inner = self.inner(py).borrow_mut();
-        let missings_vec: Vec<Revision> = rev_pyiter_collect(py, &missings)?;
         inner.add_missing_revisions(missings_vec)
             .map_err(|e| GraphError::pynew(py, e))?;
         Ok(py.None())
@@ -73,7 +80,10 @@
         let mut common: Vec<Revision> = Vec::new();
         for info in sample.iter(py)? { // info is a pair (Revision, bool)
             let mut revknown = info?.iter(py)?;
-            let rev: Revision = revknown.next().unwrap()?.extract(py)?;
+            let rev: PyRevision = revknown.next().unwrap()?.extract(py)?;
+            // This is fine since we're just using revisions as integers
+            // for the purposes of discovery
+            let rev = Revision(rev.0);
             let known: bool = revknown.next().unwrap()?.extract(py)?;
             if known {
                 common.push(rev);
@@ -107,9 +117,10 @@
         Ok(as_dict)
     }
 
-    def commonheads(&self) -> PyResult<HashSet<Revision>> {
-        self.inner(py).borrow().common_heads()
-            .map_err(|e| GraphError::pynew(py, e))
+    def commonheads(&self) -> PyResult<HashSet<PyRevision>> {
+        let res = self.inner(py).borrow().common_heads()
+                    .map_err(|e| GraphError::pynew(py, e))?;
+        Ok(res.into_iter().map(Into::into).collect())
     }
 
     def takefullsample(&self, _headrevs: PyObject,
@@ -119,20 +130,21 @@
             .map_err(|e| GraphError::pynew(py, e))?;
         let as_vec: Vec<PyObject> = sample
             .iter()
-            .map(|rev| rev.to_py_object(py).into_object())
+            .map(|rev| PyRevision(rev.0).to_py_object(py).into_object())
             .collect();
         Ok(PyTuple::new(py, as_vec.as_slice()).into_object())
     }
 
     def takequicksample(&self, headrevs: PyObject,
                         size: usize) -> PyResult<PyObject> {
+        let index = self.index(py).borrow();
         let mut inner = self.inner(py).borrow_mut();
-        let revsvec: Vec<Revision> = rev_pyiter_collect(py, &headrevs)?;
+        let revsvec: Vec<_> = rev_pyiter_collect(py, &headrevs, &*index)?;
         let sample = inner.take_quick_sample(revsvec, size)
             .map_err(|e| GraphError::pynew(py, e))?;
         let as_vec: Vec<PyObject> = sample
             .iter()
-            .map(|rev| rev.to_py_object(py).into_object())
+            .map(|rev| PyRevision(rev.0).to_py_object(py).into_object())
             .collect();
         Ok(PyTuple::new(py, as_vec.as_slice()).into_object())
     }
--- a/rust/hg-cpython/src/exceptions.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/exceptions.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -18,22 +18,15 @@
 };
 use hg;
 
+use crate::PyRevision;
+
 py_exception!(rustext, GraphError, ValueError);
 
 impl GraphError {
     pub fn pynew(py: Python, inner: hg::GraphError) -> PyErr {
         match inner {
             hg::GraphError::ParentOutOfRange(r) => {
-                GraphError::new(py, ("ParentOutOfRange", r))
-            }
-            hg::GraphError::WorkingDirectoryUnsupported => {
-                match py
-                    .import("mercurial.error")
-                    .and_then(|m| m.get(py, "WdirUnsupported"))
-                {
-                    Err(e) => e,
-                    Ok(cls) => PyErr::from_instance(py, cls),
-                }
+                GraphError::new(py, ("ParentOutOfRange", PyRevision(r.0)))
             }
         }
     }
--- a/rust/hg-cpython/src/lib.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/lib.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -24,6 +24,9 @@
 #![allow(clippy::manual_strip)] // rust-cpython macros
 #![allow(clippy::type_complexity)] // rust-cpython macros
 
+use cpython::{FromPyObject, PyInt, Python, ToPyObject};
+use hg::{BaseRevision, Revision};
+
 /// This crate uses nested private macros, `extern crate` is still needed in
 /// 2018 edition.
 #[macro_use]
@@ -44,6 +47,40 @@
 pub mod revlog;
 pub mod utils;
 
+/// Revision as exposed to/from the Python layer.
+///
+/// We need this indirection because of the orphan rule, meaning we can't
+/// implement a foreign trait (like [`cpython::ToPyObject`])
+/// for a foreign type (like [`hg::UncheckedRevision`]).
+///
+/// This also acts as a deterrent against blindly trusting Python to send
+/// us valid revision numbers.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct PyRevision(BaseRevision);
+
+impl From<Revision> for PyRevision {
+    fn from(r: Revision) -> Self {
+        PyRevision(r.0)
+    }
+}
+
+impl<'s> FromPyObject<'s> for PyRevision {
+    fn extract(
+        py: Python,
+        obj: &'s cpython::PyObject,
+    ) -> cpython::PyResult<Self> {
+        Ok(Self(obj.extract::<BaseRevision>(py)?))
+    }
+}
+
+impl ToPyObject for PyRevision {
+    type ObjectType = PyInt;
+
+    fn to_py_object(&self, py: Python) -> Self::ObjectType {
+        self.0.to_py_object(py)
+    }
+}
+
 py_module_initializer!(rustext, initrustext, PyInit_rustext, |py, m| {
     m.add(
         py,
--- a/rust/hg-cpython/src/revlog.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/hg-cpython/src/revlog.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -8,6 +8,7 @@
 use crate::{
     cindex,
     utils::{node_from_py_bytes, node_from_py_object},
+    PyRevision,
 };
 use cpython::{
     buffer::{Element, PyBuffer},
@@ -18,7 +19,7 @@
 use hg::{
     nodemap::{Block, NodeMapError, NodeTree},
     revlog::{nodemap::NodeMap, NodePrefix, RevlogIndex},
-    Revision,
+    BaseRevision, Revision, UncheckedRevision,
 };
 use std::cell::RefCell;
 
@@ -59,12 +60,13 @@
 
     /// Return Revision if found, raises a bare `error.RevlogError`
     /// in case of ambiguity, same as C version does
-    def get_rev(&self, node: PyBytes) -> PyResult<Option<Revision>> {
+    def get_rev(&self, node: PyBytes) -> PyResult<Option<PyRevision>> {
         let opt = self.get_nodetree(py)?.borrow();
         let nt = opt.as_ref().unwrap();
         let idx = &*self.cindex(py).borrow();
         let node = node_from_py_bytes(py, &node)?;
-        nt.find_bin(idx, node.into()).map_err(|e| nodemap_error(py, e))
+        let res = nt.find_bin(idx, node.into());
+        Ok(res.map_err(|e| nodemap_error(py, e))?.map(Into::into))
     }
 
     /// same as `get_rev()` but raises a bare `error.RevlogError` if node
@@ -72,7 +74,7 @@
     ///
     /// No need to repeat `node` in the exception, `mercurial/revlog.py`
     /// will catch and rewrap with it
-    def rev(&self, node: PyBytes) -> PyResult<Revision> {
+    def rev(&self, node: PyBytes) -> PyResult<PyRevision> {
         self.get_rev(py, node)?.ok_or_else(|| revlog_error(py))
     }
 
@@ -131,9 +133,11 @@
         let node = node_from_py_object(py, &node_bytes)?;
 
         let mut idx = self.cindex(py).borrow_mut();
-        let rev = idx.len() as Revision;
 
+        // This is ok since we will just add the revision to the index
+        let rev = Revision(idx.len() as BaseRevision);
         idx.append(py, tup)?;
+
         self.get_nodetree(py)?.borrow_mut().as_mut().unwrap()
             .insert(&*idx, &node, rev)
             .map_err(|e| nodemap_error(py, e))?;
@@ -252,7 +256,7 @@
         // Note that we don't seem to have a direct way to call
         // PySequence_GetItem (does the job), which would possibly be better
         // for performance
-        let key = match key.extract::<Revision>(py) {
+        let key = match key.extract::<i32>(py) {
             Ok(rev) => rev.to_py_object(py).into_object(),
             Err(_) => key,
         };
@@ -268,9 +272,9 @@
         // this is an equivalent implementation of the index_contains()
         // defined in revlog.c
         let cindex = self.cindex(py).borrow();
-        match item.extract::<Revision>(py) {
+        match item.extract::<i32>(py) {
             Ok(rev) => {
-                Ok(rev >= -1 && rev < cindex.inner().len(py)? as Revision)
+                Ok(rev >= -1 && rev < cindex.inner().len(py)? as BaseRevision)
             }
             Err(_) => {
                 cindex.inner().call_method(
@@ -331,7 +335,7 @@
     ) -> PyResult<PyObject> {
         let index = self.cindex(py).borrow();
         for r in 0..index.len() {
-            let rev = r as Revision;
+            let rev = Revision(r as BaseRevision);
             // in this case node() won't ever return None
             nt.insert(&*index, index.node(rev).unwrap(), rev)
                 .map_err(|e| nodemap_error(py, e))?
@@ -344,7 +348,7 @@
         py: Python<'a>,
     ) -> PyResult<&'a RefCell<Option<NodeTree>>> {
         if self.nt(py).borrow().is_none() {
-            let readonly = Box::new(Vec::new());
+            let readonly = Box::<Vec<_>>::default();
             let mut nt = NodeTree::load_bytes(readonly, 0);
             self.fill_nodemap(py, &mut nt)?;
             self.nt(py).borrow_mut().replace(nt);
@@ -378,7 +382,7 @@
         // If there's anything readonly, we need to build the data again from
         // scratch
         let bytes = if readonly.len() > 0 {
-            let mut nt = NodeTree::load_bytes(Box::new(vec![]), 0);
+            let mut nt = NodeTree::load_bytes(Box::<Vec<_>>::default(), 0);
             self.fill_nodemap(py, &mut nt)?;
 
             let (readonly, bytes) = nt.into_readonly_and_added_bytes();
@@ -447,14 +451,19 @@
 
         let mut nt = NodeTree::load_bytes(Box::new(bytes), len);
 
-        let data_tip =
-            docket.getattr(py, "tip_rev")?.extract::<Revision>(py)?;
+        let data_tip = docket
+            .getattr(py, "tip_rev")?
+            .extract::<BaseRevision>(py)?
+            .into();
         self.docket(py).borrow_mut().replace(docket.clone_ref(py));
         let idx = self.cindex(py).borrow();
+        let data_tip = idx.check_revision(data_tip).ok_or_else(|| {
+            nodemap_error(py, NodeMapError::RevisionNotInIndex(data_tip))
+        })?;
         let current_tip = idx.len();
 
-        for r in (data_tip + 1)..current_tip as Revision {
-            let rev = r as Revision;
+        for r in (data_tip.0 + 1)..current_tip as BaseRevision {
+            let rev = Revision(r);
             // in this case node() won't ever return None
             nt.insert(&*idx, idx.node(rev).unwrap(), rev)
                 .map_err(|e| nodemap_error(py, e))?
@@ -479,7 +488,7 @@
     }
 }
 
-fn rev_not_in_index(py: Python, rev: Revision) -> PyErr {
+fn rev_not_in_index(py: Python, rev: UncheckedRevision) -> PyErr {
     PyErr::new::<ValueError, _>(
         py,
         format!(
--- a/rust/rhg/src/blackbox.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/blackbox.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -7,12 +7,6 @@
 use hg::utils::{files::get_bytes_from_os_str, shell_quote};
 use std::ffi::OsString;
 
-const ONE_MEBIBYTE: u64 = 1 << 20;
-
-// TODO: somehow keep defaults in sync with `configitem` in `hgext/blackbox.py`
-const DEFAULT_MAX_SIZE: u64 = ONE_MEBIBYTE;
-const DEFAULT_MAX_FILES: u32 = 7;
-
 // Python does not support %.3f, only %f
 const DEFAULT_DATE_FORMAT: &str = "%Y-%m-%d %H:%M:%S%.3f";
 
@@ -53,8 +47,7 @@
         process_start_time: &'a ProcessStartTime,
     ) -> Result<Self, HgError> {
         let configured = if let Ok(repo) = invocation.repo {
-            if invocation.config.get(b"extensions", b"blackbox").is_none() {
-                // The extension is not enabled
+            if !invocation.config.is_extension_enabled(b"blackbox") {
                 None
             } else {
                 Some(ConfiguredBlackbox {
@@ -62,15 +55,28 @@
                     max_size: invocation
                         .config
                         .get_byte_size(b"blackbox", b"maxsize")?
-                        .unwrap_or(DEFAULT_MAX_SIZE),
+                        .expect(
+                            "blackbox.maxsize should have a default value",
+                        ),
                     max_files: invocation
                         .config
                         .get_u32(b"blackbox", b"maxfiles")?
-                        .unwrap_or(DEFAULT_MAX_FILES),
+                        .expect(
+                            "blackbox.maxfiles should have a default value",
+                        ),
                     date_format: invocation
                         .config
                         .get_str(b"blackbox", b"date-format")?
-                        .unwrap_or(DEFAULT_DATE_FORMAT),
+                        .map(|f| {
+                            if f.is_empty() {
+                                DEFAULT_DATE_FORMAT
+                            } else {
+                                f
+                            }
+                        })
+                        .expect(
+                            "blackbox.date-format should have a default value",
+                        ),
                 })
             }
         } else {
--- a/rust/rhg/src/commands/cat.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/commands/cat.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -32,9 +32,8 @@
 
 #[logging_timer::time("trace")]
 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
-    let cat_enabled_default = true;
-    let cat_enabled = invocation.config.get_option(b"rhg", b"cat")?;
-    if !cat_enabled.unwrap_or(cat_enabled_default) {
+    let cat_enabled = invocation.config.get_bool(b"rhg", b"cat")?;
+    if !cat_enabled {
         return Err(CommandError::unsupported(
             "cat is disabled in rhg (enable it with 'rhg.cat = true' \
             or enable fallback with 'rhg.on-unsupported = fallback')",
@@ -63,7 +62,7 @@
             return Err(CommandError::unsupported(message));
         }
 
-        let normalized = cwd.join(&file);
+        let normalized = cwd.join(file);
         // TODO: actually normalize `..` path segments etc?
         let dotted = normalized.components().any(|c| c.as_os_str() == "..");
         if file.as_bytes() == b"." || dotted {
--- a/rust/rhg/src/commands/debugdata.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/commands/debugdata.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -23,7 +23,7 @@
         )
         .group(
             ArgGroup::new("revlog")
-                .args(&["changelog", "manifest"])
+                .args(["changelog", "manifest"])
                 .required(true),
         )
         .arg(
--- a/rust/rhg/src/commands/files.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/commands/files.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -4,9 +4,12 @@
 };
 use crate::utils::path_utils::RelativizePaths;
 use clap::Arg;
+use hg::filepatterns::parse_pattern_args;
+use hg::matchers::IntersectionMatcher;
 use hg::narrow;
 use hg::operations::list_rev_tracked_files;
 use hg::repo::Repo;
+use hg::utils::files::get_bytes_from_os_str;
 use hg::utils::filter_map_results;
 use hg::utils::hg_path::HgPath;
 use rayon::prelude::*;
@@ -26,6 +29,12 @@
                 .long("revision")
                 .value_name("REV"),
         )
+        .arg(
+            Arg::new("file")
+                .value_parser(clap::value_parser!(std::ffi::OsString))
+                .help("show only these files")
+                .action(clap::ArgAction::Append),
+        )
         .about(HELP_TEXT)
 }
 
@@ -35,7 +44,8 @@
         RelativePaths::Bool(v) => v,
     };
 
-    let rev = invocation.subcommand_args.get_one::<String>("rev");
+    let args = invocation.subcommand_args;
+    let rev = args.get_one::<String>("rev");
 
     let repo = invocation.repo?;
 
@@ -51,11 +61,34 @@
         ));
     }
 
-    let (narrow_matcher, narrow_warnings) = narrow::matcher(repo)?;
+    let (matcher, narrow_warnings) = narrow::matcher(repo)?;
     print_narrow_sparse_warnings(&narrow_warnings, &[], invocation.ui, repo)?;
+    let matcher = match args.get_many::<std::ffi::OsString>("file") {
+        None => matcher,
+        Some(files) => {
+            let patterns: Vec<Vec<u8>> = files
+                .filter(|s| !s.is_empty())
+                .map(get_bytes_from_os_str)
+                .collect();
+            for file in &patterns {
+                if file.starts_with(b"set:") {
+                    return Err(CommandError::unsupported("fileset"));
+                }
+            }
+            let cwd = hg::utils::current_dir()?;
+            let root = repo.working_directory_path();
+            let ignore_patterns = parse_pattern_args(patterns, &cwd, root)?;
+            let files_matcher =
+                hg::matchers::PatternMatcher::new(ignore_patterns)?;
+            Box::new(IntersectionMatcher::new(
+                Box::new(files_matcher),
+                matcher,
+            ))
+        }
+    };
 
     if let Some(rev) = rev {
-        let files = list_rev_tracked_files(repo, rev, narrow_matcher)
+        let files = list_rev_tracked_files(repo, rev, matcher)
             .map_err(|e| (e, rev.as_ref()))?;
         display_files(invocation.ui, repo, relative_paths, files.iter())
     } else {
@@ -63,7 +96,7 @@
         let dirstate = repo.dirstate_map()?;
         let files_res: Result<Vec<_>, _> =
             filter_map_results(dirstate.iter(), |(path, entry)| {
-                Ok(if entry.tracked() && narrow_matcher.matches(path) {
+                Ok(if entry.tracked() && matcher.matches(path) {
                     Some(path)
                 } else {
                     None
--- a/rust/rhg/src/commands/root.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/commands/root.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -20,7 +20,7 @@
         .with_context(|| {
             IoErrorContext::CanonicalizingPath(working_directory.to_owned())
         })?;
-    let bytes = get_bytes_from_path(&working_directory);
+    let bytes = get_bytes_from_path(working_directory);
     invocation
         .ui
         .write_stdout(&format_bytes!(b"{}\n", bytes.as_slice()))?;
--- a/rust/rhg/src/commands/status.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/commands/status.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -7,7 +7,8 @@
 
 use crate::error::CommandError;
 use crate::ui::{
-    format_pattern_file_warning, print_narrow_sparse_warnings, Ui,
+    format_pattern_file_warning, print_narrow_sparse_warnings, relative_paths,
+    RelativePaths, Ui,
 };
 use crate::utils::path_utils::RelativizePaths;
 use clap::Arg;
@@ -17,13 +18,15 @@
 use hg::dirstate::status::StatusPath;
 use hg::dirstate::TruncatedTimestamp;
 use hg::errors::{HgError, IoResultExt};
+use hg::filepatterns::parse_pattern_args;
 use hg::lock::LockError;
 use hg::manifest::Manifest;
 use hg::matchers::{AlwaysMatcher, IntersectionMatcher};
 use hg::repo::Repo;
 use hg::utils::debug::debug_wait_for_file;
-use hg::utils::files::get_bytes_from_os_string;
-use hg::utils::files::get_path_from_bytes;
+use hg::utils::files::{
+    get_bytes_from_os_str, get_bytes_from_os_string, get_path_from_bytes,
+};
 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
 use hg::DirstateStatus;
 use hg::PatternFileWarning;
@@ -48,6 +51,12 @@
         .alias("st")
         .about(HELP_TEXT)
         .arg(
+            Arg::new("file")
+                .value_parser(clap::value_parser!(std::ffi::OsString))
+                .help("show only these files")
+                .action(clap::ArgAction::Append),
+        )
+        .arg(
             Arg::new("all")
                 .help("show status of all files")
                 .short('A')
@@ -360,13 +369,24 @@
                 }
             }
         }
-        let relative_paths = config
+
+        let relative_status = config
             .get_option(b"commands", b"status.relative")?
-            .unwrap_or(config.get_bool(b"ui", b"relative-paths")?);
+            .expect("commands.status.relative should have a default value");
+
+        let relativize_paths = relative_status || {
+            // See in Python code with `getuipathfn` usage in `commands.py`.
+            let legacy_relative_behavior = args.contains_id("file");
+            match relative_paths(invocation.config)? {
+                RelativePaths::Legacy => legacy_relative_behavior,
+                RelativePaths::Bool(v) => v,
+            }
+        };
+
         let output = DisplayStatusPaths {
             ui,
             no_status,
-            relativize: if relative_paths {
+            relativize: if relativize_paths {
                 Some(RelativizePaths::new(repo)?)
             } else {
                 None
@@ -415,6 +435,29 @@
         (false, true) => sparse_matcher,
         (false, false) => Box::new(AlwaysMatcher),
     };
+    let matcher = match args.get_many::<std::ffi::OsString>("file") {
+        None => matcher,
+        Some(files) => {
+            let patterns: Vec<Vec<u8>> = files
+                .filter(|s| !s.is_empty())
+                .map(get_bytes_from_os_str)
+                .collect();
+            for file in &patterns {
+                if file.starts_with(b"set:") {
+                    return Err(CommandError::unsupported("fileset"));
+                }
+            }
+            let cwd = hg::utils::current_dir()?;
+            let root = repo.working_directory_path();
+            let ignore_patterns = parse_pattern_args(patterns, &cwd, root)?;
+            let files_matcher =
+                hg::matchers::PatternMatcher::new(ignore_patterns)?;
+            Box::new(IntersectionMatcher::new(
+                Box::new(files_matcher),
+                matcher,
+            ))
+        }
+    };
 
     print_narrow_sparse_warnings(
         &narrow_warnings,
--- a/rust/rhg/src/error.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/error.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,8 @@
 use hg::revlog::RevlogError;
 use hg::sparse::SparseConfigError;
 use hg::utils::files::get_bytes_from_path;
-use hg::{DirstateError, DirstateMapError, StatusError};
+use hg::utils::hg_path::HgPathError;
+use hg::{DirstateError, DirstateMapError, PatternError, StatusError};
 use std::convert::From;
 
 /// The kind of command error
@@ -230,6 +231,18 @@
     }
 }
 
+impl From<HgPathError> for CommandError {
+    fn from(error: HgPathError) -> Self {
+        CommandError::unsupported(format!("{}", error))
+    }
+}
+
+impl From<PatternError> for CommandError {
+    fn from(error: PatternError) -> Self {
+        CommandError::unsupported(format!("{}", error))
+    }
+}
+
 impl From<DirstateMapError> for CommandError {
     fn from(error: DirstateMapError) -> Self {
         CommandError::abort(format!("{}", error))
--- a/rust/rhg/src/main.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/main.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -76,17 +76,23 @@
 
     // Mercurial allows users to define "defaults" for commands, fallback
     // if a default is detected for the current command
-    let defaults = config.get_str(b"defaults", subcommand_name.as_bytes());
-    if defaults?.is_some() {
-        let msg = "`defaults` config set";
-        return Err(CommandError::unsupported(msg));
+    let defaults = config.get_str(b"defaults", subcommand_name.as_bytes())?;
+    match defaults {
+        // Programmatic usage might set defaults to an empty string to unset
+        // it; allow that
+        None | Some("") => {}
+        Some(_) => {
+            let msg = "`defaults` config set";
+            return Err(CommandError::unsupported(msg));
+        }
     }
 
     for prefix in ["pre", "post", "fail"].iter() {
         // Mercurial allows users to define generic hooks for commands,
         // fallback if any are detected
         let item = format!("{}-{}", prefix, subcommand_name);
-        let hook_for_command = config.get_str(b"hooks", item.as_bytes())?;
+        let hook_for_command =
+            config.get_str_no_default(b"hooks", item.as_bytes())?;
         if hook_for_command.is_some() {
             let msg = format!("{}-{} hook defined", prefix, subcommand_name);
             return Err(CommandError::unsupported(msg));
@@ -349,11 +355,7 @@
             &argv,
             &initial_current_dir,
             &ui,
-            OnUnsupported::Fallback {
-                executable: config
-                    .get(b"rhg", b"fallback-executable")
-                    .map(ToOwned::to_owned),
-            },
+            OnUnsupported::fallback(config),
             Err(CommandError::unsupported(
                 "`rhg.fallback-immediately is true`",
             )),
@@ -402,8 +404,8 @@
     }
 }
 
-fn exit<'a>(
-    original_args: &'a [OsString],
+fn exit(
+    original_args: &[OsString],
     initial_current_dir: &Option<PathBuf>,
     ui: &Ui,
     mut on_unsupported: OnUnsupported,
@@ -662,6 +664,18 @@
 impl OnUnsupported {
     const DEFAULT: Self = OnUnsupported::Abort;
 
+    fn fallback_executable(config: &Config) -> Option<Vec<u8>> {
+        config
+            .get(b"rhg", b"fallback-executable")
+            .map(|x| x.to_owned())
+    }
+
+    fn fallback(config: &Config) -> Self {
+        OnUnsupported::Fallback {
+            executable: Self::fallback_executable(config),
+        }
+    }
+
     fn from_config(config: &Config) -> Self {
         match config
             .get(b"rhg", b"on-unsupported")
@@ -670,11 +684,7 @@
         {
             Some(b"abort") => OnUnsupported::Abort,
             Some(b"abort-silent") => OnUnsupported::AbortSilent,
-            Some(b"fallback") => OnUnsupported::Fallback {
-                executable: config
-                    .get(b"rhg", b"fallback-executable")
-                    .map(|x| x.to_owned()),
-            },
+            Some(b"fallback") => Self::fallback(config),
             None => Self::DEFAULT,
             Some(_) => {
                 // TODO: warn about unknown config value
--- a/rust/rhg/src/ui.rs	Mon Nov 06 15:38:27 2023 +0100
+++ b/rust/rhg/src/ui.rs	Tue Nov 07 15:21:11 2023 +0100
@@ -251,7 +251,7 @@
         PatternFileWarning::InvalidSyntax(path, syntax) => format_bytes!(
             b"{}: ignoring invalid syntax '{}'\n",
             get_bytes_from_path(path),
-            &*syntax
+            syntax
         ),
         PatternFileWarning::NoSuchFile(path) => {
             let path = if let Ok(relative) =
@@ -259,7 +259,7 @@
             {
                 relative
             } else {
-                &*path
+                path
             };
             format_bytes!(
                 b"skipping unreadable pattern file '{}': \
--- a/setup.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/setup.py	Tue Nov 07 15:21:11 2023 +0100
@@ -1309,6 +1309,7 @@
 
 packages = [
     'mercurial',
+    'mercurial.admin',
     'mercurial.cext',
     'mercurial.cffi',
     'mercurial.defaultrc',
@@ -1322,6 +1323,7 @@
     'mercurial.templates',
     'mercurial.thirdparty',
     'mercurial.thirdparty.attr',
+    'mercurial.thirdparty.tomli',
     'mercurial.thirdparty.zope',
     'mercurial.thirdparty.zope.interface',
     'mercurial.upgrade_utils',
@@ -1336,7 +1338,6 @@
     'hgext.git',
     'hgext.highlight',
     'hgext.hooklib',
-    'hgext.infinitepush',
     'hgext.largefiles',
     'hgext.lfs',
     'hgext.narrow',
@@ -1659,6 +1660,7 @@
 
 packagedata = {
     'mercurial': [
+        'configitems.toml',
         'locale/*/LC_MESSAGES/hg.mo',
         'dummycert.pem',
     ],
--- a/tests/bundles/test-revlog-diff-relative-to-nullrev.sh	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/bundles/test-revlog-diff-relative-to-nullrev.sh	Tue Nov 07 15:21:11 2023 +0100
@@ -12,7 +12,7 @@
 #
 #          if deltainfo is None:
 # -            deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
-# +            if revlog._generaldelta:
+# +            if revlog.delta_config.general_delta:
 # +                deltainfo = self._builddeltainfo(revinfo, nullrev, fh)
 # +            else:
 # +                deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
@@ -32,7 +32,7 @@
 hg up null
 echo ha > a
 ../../../hg commit -Am root-A
-../../../hg debugdeltachain a
+../../../hg debugdeltachain a --all-info
 rm -rf .hg/cache/ .hg/wcache/
 cd ..
 
--- a/tests/library-infinitepush.sh	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-scratchnodes() {
-  for node in `find ../repo/.hg/scratchbranches/index/nodemap/* | sort`; do
-     echo ${node##*/} `cat $node`
-  done
-}
-
-scratchbookmarks() {
-  for bookmark in `find ../repo/.hg/scratchbranches/index/bookmarkmap/* -type f | sort`; do
-     echo "${bookmark##*/bookmarkmap/} `cat $bookmark`"
-  done
-}
-
-setupcommon() {
-  cat >> $HGRCPATH << EOF
-[extensions]
-infinitepush=
-[infinitepush]
-branchpattern=re:scratch/.*
-deprecation-abort=no
-deprecation-message=yes
-
-EOF
-}
-
-setupserver() {
-cat >> .hg/hgrc << EOF
-[infinitepush]
-server=yes
-indextype=disk
-storetype=disk
-reponame=babar
-EOF
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-admin-commands.py	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,399 @@
+# Test admin commands
+
+import functools
+import unittest
+from mercurial.i18n import _
+from mercurial import error, ui as uimod
+from mercurial import registrar
+from mercurial.admin import verify
+
+
+class TestAdminVerifyFindChecks(unittest.TestCase):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.ui = uimod.ui.load()
+        self.repo = b"fake-repo"
+
+        def cleanup_table(self):
+            self.table = {}
+            self.alias_table = {}
+            self.pyramid = {}
+
+        self.addCleanup(cleanup_table, self)
+
+    def setUp(self):
+        self.table = {}
+        self.alias_table = {}
+        self.pyramid = {}
+        check = registrar.verify_check(self.table, self.alias_table)
+
+        # mock some fake check method for tests purpose
+        @check(
+            b"test.dummy",
+            alias=b"dummy",
+            options=[],
+        )
+        def check_dummy(ui, repo, **options):
+            return options
+
+        @check(
+            b"test.fake",
+            alias=b"fake",
+            options=[
+                (b'a', False, _(b'a boolean value (default: False)')),
+                (b'b', True, _(b'a boolean value (default: True)')),
+                (b'c', [], _(b'a list')),
+            ],
+        )
+        def check_fake(ui, repo, **options):
+            return options
+
+        # alias in the middle of a hierarchy
+        check(
+            b"test.noop",
+            alias=b"noop",
+            options=[],
+        )(verify.noop_func)
+
+        @check(
+            b"test.noop.deeper",
+            alias=b"deeper",
+            options=[
+                (b'y', True, _(b'a boolean value (default: True)')),
+                (b'z', [], _(b'a list')),
+            ],
+        )
+        def check_noop_deeper(ui, repo, **options):
+            return options
+
+    # args wrapper utilities
+    def find_checks(self, name):
+        return verify.find_checks(
+            name=name,
+            table=self.table,
+            alias_table=self.alias_table,
+            full_pyramid=self.pyramid,
+        )
+
+    def pass_options(self, checks, options):
+        return verify.pass_options(
+            self.ui,
+            checks,
+            options,
+            table=self.table,
+            alias_table=self.alias_table,
+            full_pyramid=self.pyramid,
+        )
+
+    def get_checks(self, names, options):
+        return verify.get_checks(
+            self.repo,
+            self.ui,
+            names=names,
+            options=options,
+            table=self.table,
+            alias_table=self.alias_table,
+            full_pyramid=self.pyramid,
+        )
+
+    # tests find_checks
+    def test_find_checks_empty_name(self):
+        with self.assertRaises(error.InputError):
+            self.find_checks(name=b"")
+
+    def test_find_checks_wrong_name(self):
+        with self.assertRaises(error.InputError):
+            self.find_checks(name=b"unknown")
+
+    def test_find_checks_dummy(self):
+        name = b"test.dummy"
+        found = self.find_checks(name=name)
+        self.assertEqual(len(found), 1)
+        self.assertIn(name, found)
+        meth = found[name]
+        self.assertTrue(callable(meth))
+        self.assertEqual(len(meth.options), 0)
+
+    def test_find_checks_fake(self):
+        name = b"test.fake"
+        found = self.find_checks(name=name)
+        self.assertEqual(len(found), 1)
+        self.assertIn(name, found)
+        meth = found[name]
+        self.assertTrue(callable(meth))
+        self.assertEqual(len(meth.options), 3)
+
+    def test_find_checks_noop(self):
+        name = b"test.noop.deeper"
+        found = self.find_checks(name=name)
+        self.assertEqual(len(found), 1)
+        self.assertIn(name, found)
+        meth = found[name]
+        self.assertTrue(callable(meth))
+        self.assertEqual(len(meth.options), 2)
+
+    def test_find_checks_from_aliases(self):
+        found = self.find_checks(name=b"dummy")
+        self.assertEqual(len(found), 1)
+        self.assertIn(b"test.dummy", found)
+
+        found = self.find_checks(name=b"fake")
+        self.assertEqual(len(found), 1)
+        self.assertIn(b"test.fake", found)
+
+        found = self.find_checks(name=b"deeper")
+        self.assertEqual(len(found), 1)
+        self.assertIn(b"test.noop.deeper", found)
+
+    def test_find_checks_from_root(self):
+        found = self.find_checks(name=b"test")
+        self.assertEqual(len(found), 3)
+        self.assertIn(b"test.dummy", found)
+        self.assertIn(b"test.fake", found)
+        self.assertIn(b"test.noop.deeper", found)
+
+    def test_find_checks_from_intermediate(self):
+        found = self.find_checks(name=b"test.noop")
+        self.assertEqual(len(found), 1)
+        self.assertIn(b"test.noop.deeper", found)
+
+    def test_find_checks_from_parent_dot_name(self):
+        found = self.find_checks(name=b"noop.deeper")
+        self.assertEqual(len(found), 1)
+        self.assertIn(b"test.noop.deeper", found)
+
+    # tests pass_options
+    def test_pass_options_no_checks_no_options(self):
+        checks = {}
+        options = []
+
+        with self.assertRaises(error.Error):
+            self.pass_options(checks=checks, options=options)
+
+    def test_pass_options_fake_empty_options(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = []
+        # should end with default options
+        expected_options = {"a": False, "b": True, "c": []}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+    def test_pass_options_fake_non_existing_options(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+
+        with self.assertRaises(error.InputError):
+            options = [b"test.fake:boom=yes"]
+            self.pass_options(checks=funcs, options=options)
+
+    def test_pass_options_fake_unrelated_options(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [b"test.noop.deeper:y=yes"]
+
+        with self.assertRaises(error.InputError):
+            self.pass_options(checks=funcs, options=options)
+
+    def test_pass_options_fake_set_option(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [b"test.fake:a=yes"]
+        expected_options = {"a": True, "b": True, "c": []}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+    def test_pass_options_fake_set_option_with_alias(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [b"fake:a=yes"]
+        expected_options = {"a": True, "b": True, "c": []}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+    def test_pass_options_fake_set_all_option(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [b"test.fake:a=yes", b"test.fake:b=no", b"test.fake:c=0,1,2"]
+        expected_options = {"a": True, "b": False, "c": [b"0", b"1", b"2"]}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+    def test_pass_options_fake_set_all_option_plus_unexisting(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [
+            b"test.fake:a=yes",
+            b"test.fake:b=no",
+            b"test.fake:c=0,1,2",
+            b"test.fake:d=0",
+        ]
+
+        with self.assertRaises(error.InputError):
+            self.pass_options(checks=funcs, options=options)
+
+    def test_pass_options_fake_duplicate_option(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [
+            b"test.fake:a=yes",
+            b"test.fake:a=no",
+        ]
+
+        with self.assertRaises(error.InputError):
+            self.pass_options(checks=funcs, options=options)
+
+    def test_pass_options_fake_set_malformed_option(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        options = [
+            b"test.fake:ayes",
+            b"test.fake:b==no",
+            b"test.fake=",
+            b"test.fake:",
+            b"test.fa=ke:d=0",
+            b"test.fa=ke:d=0",
+        ]
+
+        for opt in options:
+            with self.assertRaises(error.InputError):
+                self.pass_options(checks=funcs, options=[opt])
+
+    def test_pass_options_types(self):
+        checks = self.find_checks(name=b"test.fake")
+        funcs = {
+            n: functools.partial(f, self.ui, self.repo)
+            for n, f in checks.items()
+        }
+        # boolean, yes/no
+        options = [b"test.fake:a=yes", b"test.fake:b=no"]
+        expected_options = {"a": True, "b": False, "c": []}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+        # boolean, 0/1
+        options = [b"test.fake:a=1", b"test.fake:b=0"]
+        expected_options = {"a": True, "b": False, "c": []}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+        # boolean, true/false
+        options = [b"test.fake:a=true", b"test.fake:b=false"]
+        expected_options = {"a": True, "b": False, "c": []}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+        # boolean, wrong type
+        options = [b"test.fake:a=si"]
+        with self.assertRaises(error.InputError):
+            self.pass_options(checks=funcs, options=options)
+
+        # lists
+        options = [b"test.fake:c=0,1,2"]
+        expected_options = {"a": False, "b": True, "c": [b"0", b"1", b"2"]}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+        options = [b"test.fake:c=x,y,z"]
+        expected_options = {"a": False, "b": True, "c": [b"x", b"y", b"z"]}
+        func = self.pass_options(checks=funcs, options=options)
+
+        self.assertDictEqual(func[b"test.fake"].keywords, expected_options)
+
+    # tests get_checks
+    def test_get_checks_fake(self):
+        funcs = self.get_checks(
+            names=[b"test.fake"], options=[b"test.fake:a=yes"]
+        )
+        options = funcs.get(b"test.fake").keywords
+        expected_options = {"a": True, "b": True, "c": []}
+        self.assertDictEqual(options, expected_options)
+
+    def test_get_checks_multiple_mixed_with_defaults(self):
+        funcs = self.get_checks(
+            names=[b"test.fake", b"test.noop.deeper", b"test.dummy"],
+            options=[
+                b"test.noop.deeper:y=no",
+                b"test.noop.deeper:z=-1,0,1",
+            ],
+        )
+        options = funcs.get(b"test.fake").keywords
+        expected_options = {"a": False, "b": True, "c": []}
+        self.assertDictEqual(options, expected_options)
+
+        options = funcs.get(b"test.noop.deeper").keywords
+        expected_options = {"y": False, "z": [b"-1", b"0", b"1"]}
+        self.assertDictEqual(options, expected_options)
+
+        options = funcs.get(b"test.dummy").keywords
+        expected_options = {}
+        self.assertDictEqual(options, expected_options)
+
+    def test_broken_pyramid(self):
+        """Check that we detect pyramids that can't resolve"""
+        table = {}
+        alias_table = {}
+        pyramid = {}
+        check = registrar.verify_check(table, alias_table)
+
+        # Create two checks that clash
+        @check(b"test.wrong.intermediate")
+        def check_dummy(ui, repo, **options):
+            return options
+
+        @check(b"test.wrong.intermediate.thing")
+        def check_fake(ui, repo, **options):
+            return options
+
+        with self.assertRaises(error.ProgrammingError) as e:
+            verify.get_checks(
+                self.repo,
+                self.ui,
+                names=[b"test.wrong.intermediate"],
+                options=[],
+                table=table,
+                alias_table=alias_table,
+                full_pyramid=pyramid,
+            )
+        assert "`verify.noop_func`" in str(e.exception), str(e.exception)
+
+
+if __name__ == '__main__':
+    import silenttestrunner
+
+    silenttestrunner.main(__name__)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-admin-commands.t	Tue Nov 07 15:21:11 2023 +0100
@@ -0,0 +1,49 @@
+Test admin::verify
+
+  $ hg init admin-verify
+  $ cd admin-verify
+
+Test normal output
+
+  $ hg admin::verify -c dirstate
+  running 1 checks
+  running working-copy.dirstate
+  checking dirstate
+
+Quiet works
+
+  $ hg admin::verify -c dirstate --quiet
+
+Test no check no options
+
+  $ hg admin::verify
+  abort: `checks` required
+  [255]
+
+Test single check without options
+
+  $ hg admin::verify -c working-copy.dirstate
+  running 1 checks
+  running working-copy.dirstate
+  checking dirstate
+
+Test single check (alias) without options
+
+  $ hg admin::verify -c dirstate
+  running 1 checks
+  running working-copy.dirstate
+  checking dirstate
+
+Test wrong check name without options
+
+  $ hg admin::verify -c working-copy.dir
+  abort: unknown check working-copy.dir
+  (did you mean working-copy.dirstate?)
+  [10]
+
+Test wrong alias without options
+
+  $ hg admin::verify -c dir
+  abort: unknown check dir
+  [10]
+
--- a/tests/test-alias.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-alias.t	Tue Nov 07 15:21:11 2023 +0100
@@ -68,17 +68,17 @@
 help
 
   $ hg help -c | grep myinit
-   myinit       This is my documented alias for init.
+   myinit        This is my documented alias for init.
   $ hg help -c | grep mycommit
-   mycommit     This is my alias with only doc.
+   mycommit      This is my alias with only doc.
   $ hg help -c | grep cleanstatus
   [1]
   $ hg help -c | grep lognull
-   lognull      Logs the null rev
+   lognull       Logs the null rev
   $ hg help -c | grep dln
   [1]
   $ hg help -c | grep recursivedoc
-   recursivedoc Logs the null rev in debug mode
+   recursivedoc  Logs the null rev in debug mode
   $ hg help myinit
   hg myinit [OPTIONS] [BLA] [BLE]
   
@@ -603,7 +603,7 @@
 help for a shell alias
 
   $ hg help -c | grep rebate
-   rebate       This is my alias which just prints something.
+   rebate        This is my alias which just prints something.
   $ hg help rebate
   hg rebate [MYARGS]
   
--- a/tests/test-ancestor.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-ancestor.py	Tue Nov 07 15:21:11 2023 +0100
@@ -12,7 +12,6 @@
     debugcommands,
     hg,
     ui as uimod,
-    util,
 )
 
 
@@ -416,7 +415,7 @@
     for i, (dag, tests) in enumerate(dagtests):
         repo = hg.repository(u, b'gca%d' % i, create=1)
         cl = repo.changelog
-        if not util.safehasattr(cl.index, 'ancestors'):
+        if not hasattr(cl.index, 'ancestors'):
             # C version not available
             return
 
--- a/tests/test-bundle2-exchange.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-bundle2-exchange.t	Tue Nov 07 15:21:11 2023 +0100
@@ -917,7 +917,7 @@
   >         raise error.Abort(b"Lock should not be taken")
   >     return orig(repo, *args, **kwargs)
   > def extsetup(ui):
-  >    extensions.wrapfunction(bundle2, b'processbundle', checklock)
+  >    extensions.wrapfunction(bundle2, 'processbundle', checklock)
   > EOF
 
   $ hg init lazylock
@@ -1042,8 +1042,6 @@
   adding changesets
   remote: abort: incompatible Mercurial client; bundle2 required
   remote: (see https://www.mercurial-scm.org/wiki/IncompatibleClient)
-  transaction abort!
-  rollback completed
   abort: stream ended unexpectedly (got 0 bytes, expected 4)
   [255]
 
--- a/tests/test-byteify-strings.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-byteify-strings.t	Tue Nov 07 15:21:11 2023 +0100
@@ -110,19 +110,6 @@
   > def f():
   >     pass
   > EOF
-  $ byteify_strings testfile.py --allow-attr-methods
-  setattr(o, 'a', 1)
-  util.setattr(o, 'ae', 1)
-  util.getattr(o, 'alksjdf', b'default')
-  util.addattr(o, 'asdf')
-  util.hasattr(o, 'lksjdf', b'default')
-  util.safehasattr(o, 'lksjdf', b'default')
-  @eh.wrapfunction(func, 'lksjdf')
-  def f():
-      pass
-  @eh.wrapclass(klass, 'lksjdf')
-  def f():
-      pass
 
 Test without attr*() as methods
 
@@ -142,15 +129,15 @@
   > EOF
   $ byteify_strings testfile.py
   setattr(o, 'a', 1)
-  util.setattr(o, b'ae', 1)
-  util.getattr(o, b'alksjdf', b'default')
-  util.addattr(o, b'asdf')
-  util.hasattr(o, b'lksjdf', b'default')
-  util.safehasattr(o, b'lksjdf', b'default')
-  @eh.wrapfunction(func, b'lksjdf')
+  util.setattr(o, 'ae', 1)
+  util.getattr(o, 'alksjdf', b'default')
+  util.addattr(o, 'asdf')
+  util.hasattr(o, 'lksjdf', b'default')
+  util.safehasattr(o, 'lksjdf', b'default')
+  @eh.wrapfunction(func, 'lksjdf')
   def f():
       pass
-  @eh.wrapclass(klass, b'lksjdf')
+  @eh.wrapclass(klass, 'lksjdf')
   def f():
       pass
 
--- a/tests/test-check-execute.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-check-execute.t	Tue Nov 07 15:21:11 2023 +0100
@@ -20,5 +20,5 @@
 
 look for non scripts with no shebang
 
-  $ testrepohg files 'set:exec() and not **.sh and not **.py and not grep(r"^#!")'
+  $ testrepohg files 'set:exec() and not **.sh and not **.py and not grep(r"^#!") and not contrib/openvms/**'
   [1]
--- a/tests/test-check-py3-compat.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-check-py3-compat.t	Tue Nov 07 15:21:11 2023 +0100
@@ -10,7 +10,6 @@
   > | sed 's|\\|/|g' | xargs "$PYTHON" contrib/check-py3-compat.py \
   > | sed 's/[0-9][0-9]*)$/*)/'
   hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob) (?)
-  hgext/infinitepush/sqlindexapi.py: error importing: <*Error> No module named 'mysql' (error at sqlindexapi.py:*) (glob) (?)
   mercurial/scmwindows.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
   mercurial/win32.py: error importing: <ValueError> _type_ 'v' not supported (error at win32.py:*) (no-windows !)
   mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob) (no-windows !)
--- a/tests/test-commandserver.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-commandserver.t	Tue Nov 07 15:21:11 2023 +0100
@@ -923,7 +923,7 @@
   >         raise Exception('crash')
   >     return orig(ui, repo, conn, createcmdserver, prereposetups)
   > def extsetup(ui):
-  >     extensions.wrapfunction(commandserver, b'_serverequest', _serverequest)
+  >     extensions.wrapfunction(commandserver, '_serverequest', _serverequest)
   > EOF
   $ cat <<EOF >> .hg/hgrc
   > [extensions]
--- a/tests/test-completion.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-completion.t	Tue Nov 07 15:21:11 2023 +0100
@@ -3,6 +3,7 @@
   abort
   add
   addremove
+  admin::verify
   annotate
   archive
   backout
@@ -65,6 +66,7 @@
   abort
   add
   addremove
+  admin::verify
   annotate
   archive
 
@@ -257,6 +259,7 @@
   abort: dry-run
   add: include, exclude, subrepos, dry-run
   addremove: similarity, subrepos, include, exclude, dry-run
+  admin::verify: check, option
   annotate: rev, follow, no-follow, text, user, file, date, number, changeset, line-number, skip, ignore-all-space, ignore-space-change, ignore-blank-lines, ignore-space-at-eol, include, exclude, template
   archive: no-decode, prefix, rev, type, subrepos, include, exclude
   backout: merge, commit, no-commit, parent, rev, edit, tool, include, exclude, message, logfile, date, user
@@ -293,7 +296,7 @@
   debugdag: tags, branches, dots, spaces
   debugdata: changelog, manifest, dir
   debugdate: extended
-  debugdeltachain: changelog, manifest, dir, template
+  debugdeltachain: rev, all-info, size-info, dist-info, sparse-info, changelog, manifest, dir, template
   debugdirstateignorepatternshash: 
   debugdirstate: nodates, dates, datesort, docket, all
   debugdiscovery: old, nonheads, rev, seed, local-as-revs, remote-as-revs, ssh, remotecmd, insecure, template
--- a/tests/test-contrib-perf.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-contrib-perf.t	Tue Nov 07 15:21:11 2023 +0100
@@ -301,23 +301,38 @@
 
   $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
   ! wall * comb * user * sys * (best of 15) (glob)
+  ! wall * comb * user * sys * (max of 15) (glob)
+  ! wall * comb * user * sys * (avg of 15) (glob)
+  ! wall * comb * user * sys * (median of 15) (glob)
 
 Multiple entries
 
   $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
   ! wall * comb * user * sys * (best of 50) (glob)
+  ! wall * comb * user * sys 0.000000 (max of 50) (glob)
+  ! wall * comb * user * sys 0.000000 (avg of 50) (glob)
+  ! wall * comb * user * sys 0.000000 (median of 50) (glob)
 
 error case are ignored
 
   $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
   malformatted run limit entry, missing "-": 500
   ! wall * comb * user * sys * (best of 50) (glob)
+  ! wall * comb * user * sys * (max of 50) (glob)
+  ! wall * comb * user * sys * (avg of 50) (glob)
+  ! wall * comb * user * sys * (median of 50) (glob)
   $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
   malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
   ! wall * comb * user * sys * (best of 50) (glob)
+  ! wall * comb * user * sys * (max of 50) (glob)
+  ! wall * comb * user * sys * (avg of 50) (glob)
+  ! wall * comb * user * sys * (median of 50) (glob)
   $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
   malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
   ! wall * comb * user * sys * (best of 50) (glob)
+  ! wall * comb * user * sys * (max of 50) (glob)
+  ! wall * comb * user * sys * (avg of 50) (glob)
+  ! wall * comb * user * sys * (median of 50) (glob)
 
 test actual output
 ------------------
@@ -326,6 +341,9 @@
 
   $ hg perfheads --config perf.stub=no
   ! wall * comb * user * sys * (best of *) (glob)
+  ! wall * comb * user * sys * (max of *) (glob)
+  ! wall * comb * user * sys * (avg of *) (glob)
+  ! wall * comb * user * sys * (median of *) (glob)
 
 detailed output:
 
@@ -343,8 +361,23 @@
   $ hg perfheads --template json --config perf.stub=no
   [
    {
+    "avg.comb": *, (glob)
+    "avg.count": *, (glob)
+    "avg.sys": *, (glob)
+    "avg.user": *, (glob)
+    "avg.wall": *, (glob)
     "comb": *, (glob)
     "count": *, (glob)
+    "max.comb": *, (glob)
+    "max.count": *, (glob)
+    "max.sys": *, (glob)
+    "max.user": *, (glob)
+    "max.wall": *, (glob)
+    "median.comb": *, (glob)
+    "median.count": *, (glob)
+    "median.sys": *, (glob)
+    "median.user": *, (glob)
+    "median.wall": *, (glob)
     "sys": *, (glob)
     "user": *, (glob)
     "wall": * (glob)
@@ -386,13 +419,22 @@
 
   $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
   ! wall * comb * user * sys * (best of 1) (glob)
+  ! wall * comb * user * sys * (max of 1) (glob)
+  ! wall * comb * user * sys * (avg of 1) (glob)
+  ! wall * comb * user * sys * (median of 1) (glob)
   searching for changes
   $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
   ! wall * comb * user * sys * (best of 1) (glob)
+  ! wall * comb * user * sys * (max of 1) (glob)
+  ! wall * comb * user * sys * (avg of 1) (glob)
+  ! wall * comb * user * sys * (median of 1) (glob)
   searching for changes
   searching for changes
   $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
   ! wall * comb * user * sys * (best of 1) (glob)
+  ! wall * comb * user * sys * (max of 1) (glob)
+  ! wall * comb * user * sys * (avg of 1) (glob)
+  ! wall * comb * user * sys * (median of 1) (glob)
   searching for changes
   searching for changes
   searching for changes
--- a/tests/test-debugcommands.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-debugcommands.t	Tue Nov 07 15:21:11 2023 +0100
@@ -205,7 +205,7 @@
 #endif
 
 #if reporevlogstore no-pure
-  $ hg debugdeltachain -m
+  $ hg debugdeltachain -m --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio   readsize largestblk rddensity srchunks
         0      -1      -1       1        1       -1    base         44         43         44   1.02326        44         0    0.00000         44         44   1.00000        1
         1       0      -1       2        1       -1    base          0          0          0   0.00000         0         0    0.00000          0          0   1.00000        1
@@ -216,7 +216,50 @@
   1 2 1
   2 3 1
 
-  $ hg debugdeltachain -m -Tjson
+  $ hg debugdeltachain -m -Tjson  --size-info
+  [
+   {
+    "chainid": 1,
+    "chainlen": 1,
+    "chainratio": 1.0232558139534884,
+    "chainsize": 44,
+    "compsize": 44,
+    "deltatype": "base",
+    "p1": -1,
+    "p2": -1,
+    "prevrev": -1,
+    "rev": 0,
+    "uncompsize": 43
+   },
+   {
+    "chainid": 2,
+    "chainlen": 1,
+    "chainratio": 0,
+    "chainsize": 0,
+    "compsize": 0,
+    "deltatype": "base",
+    "p1": 0,
+    "p2": -1,
+    "prevrev": -1,
+    "rev": 1,
+    "uncompsize": 0
+   },
+   {
+    "chainid": 3,
+    "chainlen": 1,
+    "chainratio": 1.0232558139534884,
+    "chainsize": 44,
+    "compsize": 44,
+    "deltatype": "base",
+    "p1": 1,
+    "p2": -1,
+    "prevrev": -1,
+    "rev": 2,
+    "uncompsize": 43
+   }
+  ]
+
+  $ hg debugdeltachain -m -Tjson  --all-info
   [
    {
     "chainid": 1,
@@ -286,18 +329,61 @@
   > [experimental]
   > sparse-read = True
   > EOF
-  $ hg debugdeltachain -m
+  $ hg debugdeltachain -m --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio   readsize largestblk rddensity srchunks
         0      -1      -1       1        1       -1    base         44         43         44   1.02326        44         0    0.00000         44         44   1.00000        1
         1       0      -1       2        1       -1    base          0          0          0   0.00000         0         0    0.00000          0          0   1.00000        1
         2       1      -1       3        1       -1    base         44         43         44   1.02326        44         0    0.00000         44         44   1.00000        1
 
-  $ hg debugdeltachain -m -T '{rev} {chainid} {chainlen} {readsize} {largestblock} {readdensity}\n'
+  $ hg debugdeltachain -m --sparse-info -T '{rev} {chainid} {chainlen} {readsize} {largestblock} {readdensity}\n'
   0 1 1 44 44 1.0
   1 2 1 0 0 1
   2 3 1 44 44 1.0
 
-  $ hg debugdeltachain -m -Tjson
+  $ hg debugdeltachain -m -Tjson --sparse-info
+  [
+   {
+    "chainid": 1,
+    "chainlen": 1,
+    "deltatype": "base",
+    "largestblock": 44,
+    "p1": -1,
+    "p2": -1,
+    "prevrev": -1,
+    "readdensity": 1.0,
+    "readsize": 44,
+    "rev": 0,
+    "srchunks": 1
+   },
+   {
+    "chainid": 2,
+    "chainlen": 1,
+    "deltatype": "base",
+    "largestblock": 0,
+    "p1": 0,
+    "p2": -1,
+    "prevrev": -1,
+    "readdensity": 1,
+    "readsize": 0,
+    "rev": 1,
+    "srchunks": 1
+   },
+   {
+    "chainid": 3,
+    "chainlen": 1,
+    "deltatype": "base",
+    "largestblock": 44,
+    "p1": 1,
+    "p2": -1,
+    "prevrev": -1,
+    "readdensity": 1.0,
+    "readsize": 44,
+    "rev": 2,
+    "srchunks": 1
+   }
+  ]
+
+  $ hg debugdeltachain -m -Tjson --all-info
   [
    {
     "chainid": 1,
@@ -374,7 +460,7 @@
   >   hg ci -m "a default:$i"
   >   hg up -q other
   > done
-  $ hg debugdeltachain a -T '{rev} {srchunks}\n' \
+  $ hg debugdeltachain a -T '{rev} {srchunks}\n'  --all-info\
   >    --config experimental.sparse-read.density-threshold=0.50 \
   >    --config experimental.sparse-read.min-gap-size=0
   0 1
--- a/tests/test-demandimport.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-demandimport.py	Tue Nov 07 15:21:11 2023 +0100
@@ -179,15 +179,13 @@
         'cannot import name unknownattr'
     )
 
-from mercurial import util
-
 # Unlike the import statement, __import__() function should not raise
 # ImportError even if fromlist has an unknown item
 # (see Python/import.c:import_module_level() and ensure_fromlist())
 assert 'ftplib' not in sys.modules
 zipfileimp = __import__('ftplib', globals(), locals(), ['unknownattr'])
 assert f(zipfileimp) == "<module 'ftplib' from '?'>", f(zipfileimp)
-assert not util.safehasattr(zipfileimp, 'unknownattr')
+assert not hasattr(zipfileimp, 'unknownattr')
 
 
 # test deactivation for issue6725
--- a/tests/test-devel-warnings.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-devel-warnings.t	Tue Nov 07 15:21:11 2023 +0100
@@ -455,10 +455,10 @@
   > EOF
 
   $ hg --config "extensions.buggyconfig=${TESTTMP}/buggyconfig.py" buggyconfig
-  devel-warn: extension 'buggyconfig' overwrite config item 'ui.interactive' at: */mercurial/extensions.py:* (_loadextra) (glob) (no-pyoxidizer !)
-  devel-warn: extension 'buggyconfig' overwrite config item 'ui.quiet' at: */mercurial/extensions.py:* (_loadextra) (glob) (no-pyoxidizer !)
-  devel-warn: extension 'buggyconfig' overwrite config item 'ui.interactive' at: mercurial.extensions:* (_loadextra) (glob) (pyoxidizer !)
-  devel-warn: extension 'buggyconfig' overwrite config item 'ui.quiet' at: mercurial.extensions:* (_loadextra) (glob) (pyoxidizer !)
+  devel-warn: extension 'buggyconfig' overwrites config item 'ui.interactive' at: */mercurial/extensions.py:* (_loadextra) (glob) (no-pyoxidizer !)
+  devel-warn: extension 'buggyconfig' overwrites config item 'ui.quiet' at: */mercurial/extensions.py:* (_loadextra) (glob) (no-pyoxidizer !)
+  devel-warn: extension 'buggyconfig' overwrites config item 'ui.interactive' at: mercurial.extensions:* (_loadextra) (glob) (pyoxidizer !)
+  devel-warn: extension 'buggyconfig' overwrites config item 'ui.quiet' at: mercurial.extensions:* (_loadextra) (glob) (pyoxidizer !)
   devel-warn: specifying a mismatched default value for a registered config item: 'ui.quiet' 'True' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
   devel-warn: specifying a mismatched default value for a registered config item: 'ui.interactive' 'False' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
   devel-warn: specifying a mismatched default value for a registered config item: 'test.some' 'bar' at: $TESTTMP/buggyconfig.py:* (cmdbuggyconfig) (glob)
--- a/tests/test-fncache.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-fncache.t	Tue Nov 07 15:21:11 2023 +0100
@@ -275,7 +275,7 @@
   > 
   > def uisetup(ui):
   >     extensions.wrapfunction(
-  >         localrepo.localrepository, b'transaction', wrapper)
+  >         localrepo.localrepository, 'transaction', wrapper)
   > 
   > cmdtable = {}
   > 
--- a/tests/test-generaldelta.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-generaldelta.t	Tue Nov 07 15:21:11 2023 +0100
@@ -74,8 +74,8 @@
   $ cd client
   $ hg pull -q ../server -r 4
   $ hg debugdeltachain x
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0      -1      -1       1        1       -1    base          3          2          3   1.50000         3         0    0.00000
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        1       -1    base
 
   $ cd ..
 
@@ -104,23 +104,23 @@
   updating to branch default
   3 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg -R repo debugdeltachain -m
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0      -1      -1       1        1       -1    base        10?        135        10?   0.7????       10?         0    0.00000 (glob)
-        1       0      -1       1        2        0    prev         57        135        1??   1.?????       16?         0    0.00000 (glob)
-        2       0      -1       1        3        1    prev         57        135        2??   1.6????       2??         0    0.00000 (glob)
-        3       0      -1       2        1       -1    base        104        135        104   0.77037       104         0    0.00000
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        1       -1    base
+        1       0      -1       1        2        0    prev
+        2       0      -1       1        3        1    prev
+        3       0      -1       2        1       -1    base
   $ hg -R usegd debugdeltachain -m
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0      -1      -1       1        1       -1    base        10?        135        10?   0.7????       10?         0    0.00000 (glob)
-        1       0      -1       1        2        0      p1         57        135        16?   1.?????       16?         0    0.00000 (glob)
-        2       0      -1       1        3        1    prev         57        135        2??   1.6????       2??         0    0.00000 (glob)
-        3       0      -1       1        2        0      p1         57        135        16?   1.?????       27?       114    0.????? (glob)
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        1       -1    base
+        1       0      -1       1        2        0      p1
+        2       0      -1       1        3        1    prev
+        3       0      -1       1        2        0      p1
   $ hg -R full debugdeltachain -m
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0      -1      -1       1        1       -1    base        10?        135        10?   0.7????       10?         0    0.00000 (glob)
-        1       0      -1       1        2        0      p1         57        135        16?   1.?????       16?         0    0.00000 (glob)
-        2       0      -1       1        2        0      p1         57        135        16?   1.?????       2??        57    0.3???? (glob)
-        3       0      -1       1        2        0      p1         57        135        16?   1.?????       27?       114    0.????? (glob)
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        1       -1    base
+        1       0      -1       1        2        0      p1
+        2       0      -1       1        2        0      p1
+        3       0      -1       1        2        0      p1
 
 Test revlog.optimize-delta-parent-choice
 
@@ -140,10 +140,10 @@
   $ hg merge -q 0
   $ hg commit -q -m merge
   $ hg debugdeltachain -m
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0      -1      -1       1        1       -1    base         ??        215         ??   0.?????        ??         0    0.00000 (glob)
-        1      -1      -1       1        2        0    prev         ??         86        1??   1.?????       1??         0    0.00000 (glob)
-        2       1       0       1        2        0      p2         ??        301        1??   0.4????       ???        ??    0.5???? (glob)
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        1       -1    base
+        1      -1      -1       1        2        0    prev
+        2       1       0       1        2        0      p2
 
   $ hg strip -q -r . --config extensions.strip=
 
@@ -152,10 +152,10 @@
   $ hg merge -q 0
   $ hg commit -q -m merge --config storage.revlog.optimize-delta-parent-choice=yes
   $ hg debugdeltachain -m
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0      -1      -1       1        1       -1    base         ??        215         ??   0.?????        ??         0    0.00000 (glob)
-        1      -1      -1       1        2        0    prev         ??         86        1??   1.?????       1??         0    0.00000 (glob)
-        2       1       0       1        2        0      p2         ??        301        1??   0.4????       ???        ??    0.5???? (glob)
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        1       -1    base
+        1      -1      -1       1        2        0    prev
+        2       1       0       1        2        0      p2
 
 Test that strip bundle use bundle2
   $ hg --config extensions.strip= strip .
@@ -215,7 +215,7 @@
   0 files updated, 0 files merged, 14 files removed, 0 files unresolved
   $ 
   $ cd ..
-  $ hg -R source-repo debugdeltachain -m
+  $ hg -R source-repo debugdeltachain -m --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
         0      -1      -1       1        1       -1    base         46         45         46   1.02222        46         0    0.00000
         1       0      -1       1        2        0      p1         57         90        103   1.14444       103         0    0.00000
@@ -281,7 +281,7 @@
   new changesets 61246295ee1e:c930ac4a5b32
   updating to branch default
   14 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R relax-chain debugdeltachain -m
+  $ hg -R relax-chain debugdeltachain -m --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
         0      -1      -1       1        1       -1    base         46         45         46   1.02222        46         0    0.00000
         1       0      -1       1        2        0      p1         57         90        103   1.14444       103         0    0.00000
@@ -347,7 +347,7 @@
   new changesets 61246295ee1e:c930ac4a5b32
   updating to branch default
   14 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R noconst-chain debugdeltachain -m
+  $ hg -R noconst-chain debugdeltachain -m --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
         0      -1      -1       1        1       -1    base         46         45         46   1.02222        46         0    0.00000
         1       0      -1       1        2        0      p1         57         90        103   1.14444       103         0    0.00000
--- a/tests/test-globalopts.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-globalopts.t	Tue Nov 07 15:21:11 2023 +0100
@@ -378,6 +378,8 @@
   
   Repository maintenance:
   
+   admin::verify
+                 verify the integrity of the repository
    manifest      output the current or given revision of the project manifest
    recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
@@ -513,6 +515,8 @@
   
   Repository maintenance:
   
+   admin::verify
+                 verify the integrity of the repository
    manifest      output the current or given revision of the project manifest
    recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
--- a/tests/test-help-hide.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-help-hide.t	Tue Nov 07 15:21:11 2023 +0100
@@ -77,6 +77,8 @@
   
   Repository maintenance:
   
+   admin::verify
+                 verify the integrity of the repository
    manifest      output the current or given revision of the project manifest
    recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
@@ -216,6 +218,8 @@
   
   Repository maintenance:
   
+   admin::verify
+                 verify the integrity of the repository
    manifest      output the current or given revision of the project manifest
    recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
--- a/tests/test-help.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-help.t	Tue Nov 07 15:21:11 2023 +0100
@@ -129,6 +129,8 @@
   
   Repository maintenance:
   
+   admin::verify
+                 verify the integrity of the repository
    manifest      output the current or given revision of the project manifest
    recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
@@ -260,6 +262,8 @@
   
   Repository maintenance:
   
+   admin::verify
+                 verify the integrity of the repository
    manifest      output the current or given revision of the project manifest
    recover       roll back an interrupted transaction
    verify        verify the integrity of the repository
@@ -604,9 +608,16 @@
   $ hg help ad
   list of commands:
   
+  Working directory management:
+  
    add           add the specified files on the next commit
    addremove     add all new files, delete all missing files
   
+  Repository maintenance:
+  
+   admin::verify
+                 verify the integrity of the repository
+  
   (use 'hg help -v ad' to show built-in aliases and global options)
 
 Test command without options
@@ -626,6 +637,9 @@
       Please see https://mercurial-scm.org/wiki/RepositoryCorruption for more
       information about recovery from corruption of the repository.
   
+      For an alternative UI with a lot more control over the verification
+      process and better error reporting, try 'hg help admin::verify'.
+  
       Returns 0 on success, 1 if errors are encountered.
   
   options:
@@ -2650,6 +2664,13 @@
   add all new files, delete all missing files
   </td></tr>
   <tr><td>
+  <a href="/help/admin::verify">
+  admin::verify
+  </a>
+  </td><td>
+  verify the integrity of the repository
+  </td></tr>
+  <tr><td>
   <a href="/help/archive">
   archive
   </a>
--- a/tests/test-hgweb-json.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-hgweb-json.t	Tue Nov 07 15:21:11 2023 +0100
@@ -2112,6 +2112,10 @@
         "topic": "addremove"
       },
       {
+        "summary": "verify the integrity of the repository",
+        "topic": "admin::verify"
+      },
+      {
         "summary": "create an unversioned archive of a repository revision",
         "topic": "archive"
       },
--- a/tests/test-hgweb.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-hgweb.t	Tue Nov 07 15:21:11 2023 +0100
@@ -876,7 +876,7 @@
   >     except ValueError:
   >         raise error.Abort(b'signal.signal() called in thread?')
   > def uisetup(ui):
-  >    extensions.wrapfunction(signal, b'signal', disabledsig)
+  >    extensions.wrapfunction(signal, 'signal', disabledsig)
   > EOF
 
  by default, signal interrupt should be disabled while making a lock file
--- a/tests/test-hook.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-hook.t	Tue Nov 07 15:21:11 2023 +0100
@@ -991,7 +991,7 @@
   Traceback (most recent call last):
   ModuleNotFoundError: No module named 'hgext_syntaxerror'
   Traceback (most recent call last):
-      raise error.HookLoadError( (py38 !)
+      raise error.HookLoadError(msg, hint=tracebackhint) (py37 !)
   mercurial.error.HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
   abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed
 
@@ -1156,7 +1156,7 @@
   Traceback (most recent call last):
   ModuleNotFoundError: No module named 'hgext_importfail'
   Traceback (most recent call last):
-      raise error.HookLoadError( (py38 !)
+      raise error.HookLoadError(msg, hint=tracebackhint) (py37 !)
   mercurial.error.HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed
   abort: precommit.importfail hook is invalid: import of "importfail" failed
 
--- a/tests/test-http-bad-server.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-http-bad-server.t	Tue Nov 07 15:21:11 2023 +0100
@@ -725,8 +725,6 @@
   $ hg clone http://localhost:$HGPORT/ clone
   requesting all changes
   adding changesets
-  transaction abort!
-  rollback completed
   abort: HTTP request error (incomplete response)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
@@ -759,8 +757,6 @@
   $ hg clone http://localhost:$HGPORT/ clone
   requesting all changes
   adding changesets
-  transaction abort!
-  rollback completed
   abort: HTTP request error (incomplete response*) (glob)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
@@ -795,8 +791,6 @@
   $ hg clone http://localhost:$HGPORT/ clone
   requesting all changes
   adding changesets
-  transaction abort!
-  rollback completed
   abort: HTTP request error (incomplete response)
   (this may be an intermittent network failure; if the error persists, consider contacting the network or server operator)
   [255]
--- a/tests/test-infinitepush-bundlestore.t	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,755 +0,0 @@
-#require no-reposimplestore no-chg
-
-XXX-CHG this test hangs if `hg` is really `chg`. This was hidden by the use of
-`alias hg=chg` by run-tests.py. With such alias removed, this test is revealed
-buggy. This need to be resolved sooner than later.
-
-
-Testing infinipush extension and the confi options provided by it
-
-Create an ondisk bundlestore in .hg/scratchbranches
-  $ . "$TESTDIR/library-infinitepush.sh"
-  $ cp $HGRCPATH $TESTTMP/defaulthgrc
-  $ setupcommon
-  $ mkcommit() {
-  >    echo "$1" > "$1"
-  >    hg add "$1"
-  >    hg ci -m "$1"
-  > }
-  $ hg init repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd repo
-
-Check that we can send a scratch on the server and it does not show there in
-the history but is stored on disk
-  $ setupserver
-  $ cd ..
-  $ hg clone ssh://user@dummy/repo client -q
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd client
-  $ mkcommit initialcommit
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r .
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: adding changesets
-  remote: adding manifests
-  remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
-  $ mkcommit scratchcommit
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r . -B scratch/mybranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 1 commit:
-  remote:     20759b6926ce  scratchcommit
-  $ hg log -G
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  changeset:   1:20759b6926ce
-  |  bookmark:    scratch/mybranch
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     scratchcommit
-  |
-  o  changeset:   0:67145f466344
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     initialcommit
-  
-  $ hg log -G -R ../repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  changeset:   0:67145f466344
-     tag:         tip
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     initialcommit
-  
-  $ find ../repo/.hg/scratchbranches | sort
-  ../repo/.hg/scratchbranches
-  ../repo/.hg/scratchbranches/filebundlestore
-  ../repo/.hg/scratchbranches/filebundlestore/b9
-  ../repo/.hg/scratchbranches/filebundlestore/b9/e1
-  ../repo/.hg/scratchbranches/filebundlestore/b9/e1/b9e1ee5f93fb6d7c42496fc176c09839639dd9cc
-  ../repo/.hg/scratchbranches/index
-  ../repo/.hg/scratchbranches/index/bookmarkmap
-  ../repo/.hg/scratchbranches/index/bookmarkmap/scratch
-  ../repo/.hg/scratchbranches/index/bookmarkmap/scratch/mybranch
-  ../repo/.hg/scratchbranches/index/nodemap
-  ../repo/.hg/scratchbranches/index/nodemap/20759b6926ce827d5a8c73eb1fa9726d6f7defb2
-
-From another client we can get the scratchbranch if we ask for it explicitely
-
-  $ cd ..
-  $ hg clone ssh://user@dummy/repo client2 -q
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd client2
-  $ hg pull -B scratch/mybranch --traceback
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 20759b6926ce (1 drafts)
-  (run 'hg update' to get a working copy)
-  $ hg log -G
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  changeset:   1:20759b6926ce
-  |  bookmark:    scratch/mybranch
-  |  tag:         tip
-  |  user:        test
-  |  date:        Thu Jan 01 00:00:00 1970 +0000
-  |  summary:     scratchcommit
-  |
-  @  changeset:   0:67145f466344
-     user:        test
-     date:        Thu Jan 01 00:00:00 1970 +0000
-     summary:     initialcommit
-  
-  $ cd ..
-
-Push to non-scratch bookmark
-
-  $ cd client
-  $ hg up 0
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ mkcommit newcommit
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  created new head
-  $ hg push -r .
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: adding changesets
-  remote: adding manifests
-  remote: adding file changes
-  remote: added 1 changesets with 1 changes to 1 files
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  newcommit public
-  |
-  | o  scratchcommit draft scratch/mybranch
-  |/
-  o  initialcommit public
-  
-
-Push to scratch branch
-  $ cd ../client2
-  $ hg up -q scratch/mybranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ mkcommit 'new scratch commit'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r . -B scratch/mybranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 2 commits:
-  remote:     20759b6926ce  scratchcommit
-  remote:     1de1d7d92f89  new scratch commit
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  new scratch commit draft scratch/mybranch
-  |
-  o  scratchcommit draft
-  |
-  o  initialcommit public
-  
-  $ scratchnodes
-  1de1d7d92f8965260391d0513fe8a8d5973d3042 bed63daed3beba97fff2e819a148cf415c217a85
-  20759b6926ce827d5a8c73eb1fa9726d6f7defb2 bed63daed3beba97fff2e819a148cf415c217a85
-
-  $ scratchbookmarks
-  scratch/mybranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
-
-Push scratch bookmark with no new revs
-  $ hg push -r . -B scratch/anotherbranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 2 commits:
-  remote:     20759b6926ce  scratchcommit
-  remote:     1de1d7d92f89  new scratch commit
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  new scratch commit draft scratch/anotherbranch scratch/mybranch
-  |
-  o  scratchcommit draft
-  |
-  o  initialcommit public
-  
-  $ scratchbookmarks
-  scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
-  scratch/mybranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
-
-Pull scratch and non-scratch bookmark at the same time
-
-  $ hg -R ../repo book newbook
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd ../client
-  $ hg pull -B newbook -B scratch/mybranch --traceback
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  adding remote bookmark newbook
-  added 1 changesets with 1 changes to 2 files
-  new changesets 1de1d7d92f89 (1 drafts)
-  (run 'hg update' to get a working copy)
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  new scratch commit draft scratch/mybranch
-  |
-  | @  newcommit public
-  | |
-  o |  scratchcommit draft
-  |/
-  o  initialcommit public
-  
-
-Push scratch revision without bookmark with --bundle-store
-
-  $ hg up -q tip
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ mkcommit scratchcommitnobook
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  scratchcommitnobook draft
-  |
-  o  new scratch commit draft scratch/mybranch
-  |
-  | o  newcommit public
-  | |
-  o |  scratchcommit draft
-  |/
-  o  initialcommit public
-  
-  $ hg push -r . --bundle-store
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 3 commits:
-  remote:     20759b6926ce  scratchcommit
-  remote:     1de1d7d92f89  new scratch commit
-  remote:     2b5d271c7e0d  scratchcommitnobook
-  $ hg -R ../repo log -G -T '{desc} {phase}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  newcommit public
-  |
-  o  initialcommit public
-  
-
-  $ scratchnodes
-  1de1d7d92f8965260391d0513fe8a8d5973d3042 66fa08ff107451320512817bed42b7f467a1bec3
-  20759b6926ce827d5a8c73eb1fa9726d6f7defb2 66fa08ff107451320512817bed42b7f467a1bec3
-  2b5d271c7e0d25d811359a314d413ebcc75c9524 66fa08ff107451320512817bed42b7f467a1bec3
-
-Test with pushrebase
-  $ mkcommit scratchcommitwithpushrebase
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r . -B scratch/mybranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 4 commits:
-  remote:     20759b6926ce  scratchcommit
-  remote:     1de1d7d92f89  new scratch commit
-  remote:     2b5d271c7e0d  scratchcommitnobook
-  remote:     d8c4f54ab678  scratchcommitwithpushrebase
-  $ hg -R ../repo log -G -T '{desc} {phase}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  newcommit public
-  |
-  o  initialcommit public
-  
-  $ scratchnodes
-  1de1d7d92f8965260391d0513fe8a8d5973d3042 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
-  20759b6926ce827d5a8c73eb1fa9726d6f7defb2 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
-  2b5d271c7e0d25d811359a314d413ebcc75c9524 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
-  d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 e3cb2ac50f9e1e6a5ead3217fc21236c84af4397
-
-Change the order of pushrebase and infinitepush
-  $ mkcommit scratchcommitwithpushrebase2
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r . -B scratch/mybranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 5 commits:
-  remote:     20759b6926ce  scratchcommit
-  remote:     1de1d7d92f89  new scratch commit
-  remote:     2b5d271c7e0d  scratchcommitnobook
-  remote:     d8c4f54ab678  scratchcommitwithpushrebase
-  remote:     6c10d49fe927  scratchcommitwithpushrebase2
-  $ hg -R ../repo log -G -T '{desc} {phase}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  newcommit public
-  |
-  o  initialcommit public
-  
-  $ scratchnodes
-  1de1d7d92f8965260391d0513fe8a8d5973d3042 cd0586065eaf8b483698518f5fc32531e36fd8e0
-  20759b6926ce827d5a8c73eb1fa9726d6f7defb2 cd0586065eaf8b483698518f5fc32531e36fd8e0
-  2b5d271c7e0d25d811359a314d413ebcc75c9524 cd0586065eaf8b483698518f5fc32531e36fd8e0
-  6c10d49fe92751666c40263f96721b918170d3da cd0586065eaf8b483698518f5fc32531e36fd8e0
-  d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 cd0586065eaf8b483698518f5fc32531e36fd8e0
-
-Non-fastforward scratch bookmark push
-
-  $ hg log -GT "{rev}:{node} {desc}\n"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  6:6c10d49fe92751666c40263f96721b918170d3da scratchcommitwithpushrebase2
-  |
-  o  5:d8c4f54ab678fd67cb90bb3f272a2dc6513a59a7 scratchcommitwithpushrebase
-  |
-  o  4:2b5d271c7e0d25d811359a314d413ebcc75c9524 scratchcommitnobook
-  |
-  o  3:1de1d7d92f8965260391d0513fe8a8d5973d3042 new scratch commit
-  |
-  | o  2:91894e11e8255bf41aa5434b7b98e8b2aa2786eb newcommit
-  | |
-  o |  1:20759b6926ce827d5a8c73eb1fa9726d6f7defb2 scratchcommit
-  |/
-  o  0:67145f4663446a9580364f70034fea6e21293b6f initialcommit
-  
-  $ hg up 6c10d49fe927
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ echo 1 > amend
-  $ hg add amend
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg ci --amend -m 'scratch amended commit'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  saved backup bundle to $TESTTMP/client/.hg/strip-backup/6c10d49fe927-c99ffec5-amend.hg
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  scratch amended commit draft scratch/mybranch
-  |
-  o  scratchcommitwithpushrebase draft
-  |
-  o  scratchcommitnobook draft
-  |
-  o  new scratch commit draft
-  |
-  | o  newcommit public
-  | |
-  o |  scratchcommit draft
-  |/
-  o  initialcommit public
-  
-
-  $ scratchbookmarks
-  scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
-  scratch/mybranch 6c10d49fe92751666c40263f96721b918170d3da
-  $ hg push -r . -B scratch/mybranch
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 5 commits:
-  remote:     20759b6926ce  scratchcommit
-  remote:     1de1d7d92f89  new scratch commit
-  remote:     2b5d271c7e0d  scratchcommitnobook
-  remote:     d8c4f54ab678  scratchcommitwithpushrebase
-  remote:     8872775dd97a  scratch amended commit
-  $ scratchbookmarks
-  scratch/anotherbranch 1de1d7d92f8965260391d0513fe8a8d5973d3042
-  scratch/mybranch 8872775dd97a750e1533dc1fbbca665644b32547
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  scratch amended commit draft scratch/mybranch
-  |
-  o  scratchcommitwithpushrebase draft
-  |
-  o  scratchcommitnobook draft
-  |
-  o  new scratch commit draft
-  |
-  | o  newcommit public
-  | |
-  o |  scratchcommit draft
-  |/
-  o  initialcommit public
-  
-Check that push path is not ignored. Add new path to the hgrc
-  $ cat >> .hg/hgrc << EOF
-  > [paths]
-  > peer=ssh://user@dummy/client2
-  > EOF
-
-Checkout last non-scrath commit
-  $ hg up 91894e11e8255
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  1 files updated, 0 files merged, 6 files removed, 0 files unresolved
-  $ mkcommit peercommit
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-Use --force because this push creates new head
-  $ hg push peer -r . -f
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/client2
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: adding changesets
-  remote: adding manifests
-  remote: adding file changes
-  remote: added 2 changesets with 2 changes to 2 files (+1 heads)
-  $ hg -R ../repo log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  newcommit public
-  |
-  o  initialcommit public
-  
-  $ hg -R ../client2 log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  peercommit public
-  |
-  o  newcommit public
-  |
-  | @  new scratch commit draft scratch/anotherbranch scratch/mybranch
-  | |
-  | o  scratchcommit draft
-  |/
-  o  initialcommit public
-  
--- a/tests/test-infinitepush-ci.t	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,579 +0,0 @@
-#require no-reposimplestore
-
-Testing the case when there is no infinitepush extension present on the client
-side and the server routes each push to bundlestore. This case is very much
-similar to CI use case.
-
-Setup
------
-
-  $ . "$TESTDIR/library-infinitepush.sh"
-  $ cat >> $HGRCPATH <<EOF
-  > [alias]
-  > glog = log -GT "{rev}:{node|short} {desc}\n{phase}"
-  > EOF
-  $ cp $HGRCPATH $TESTTMP/defaulthgrc
-  $ hg init repo
-  $ cd repo
-  $ setupserver
-  $ echo "pushtobundlestore = True" >> .hg/hgrc
-  $ echo "[extensions]" >> .hg/hgrc
-  $ echo "infinitepush=" >> .hg/hgrc
-  $ echo "[infinitepush]" >> .hg/hgrc
-  $ echo "deprecation-abort=no" >> .hg/hgrc
-  $ echo initialcommit > initialcommit
-  $ hg ci -Aqm "initialcommit"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact (chg !)
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be (chg !)
-  unused and barring learning of users of this functionality, we drop this (chg !)
-  extension in Mercurial 6.6. (chg !)
-  $ hg phase --public .
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-
-  $ cd ..
-  $ hg clone repo client -q
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg clone repo client2 -q
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg clone ssh://user@dummy/repo client3 -q
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  $ cd client
-
-Pushing a new commit from the client to the server
------------------------------------------------------
-
-  $ echo foobar > a
-  $ hg ci -Aqm "added a"
-  $ hg glog
-  @  1:6cb0989601f1 added a
-  |  draft
-  o  0:67145f466344 initialcommit
-     public
-
-  $ hg push
-  pushing to $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  storing changesets on the bundlestore
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing 1 commit:
-      6cb0989601f1  added a
-
-  $ scratchnodes
-  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
-
-Understanding how data is stored on the bundlestore in server
--------------------------------------------------------------
-
-There are two things, filebundlestore and index
-  $ ls ../repo/.hg/scratchbranches
-  filebundlestore
-  index
-
-filebundlestore stores the bundles
-  $ ls ../repo/.hg/scratchbranches/filebundlestore/3b/41/
-  3b414252ff8acab801318445d88ff48faf4a28c3
-
-index/nodemap stores a map of node id and file in which bundle is stored in filebundlestore
-  $ ls ../repo/.hg/scratchbranches/index/
-  nodemap
-  $ ls ../repo/.hg/scratchbranches/index/nodemap/
-  6cb0989601f1fb5805238edfb16f3606713d9a0b
-
-  $ cd ../repo
-
-Checking that the commit was not applied to revlog on the server
-------------------------------------------------------------------
-
-  $ hg glog
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  @  0:67145f466344 initialcommit
-     public
-
-Applying the changeset from the bundlestore
---------------------------------------------
-
-  $ hg unbundle .hg/scratchbranches/filebundlestore/3b/41/3b414252ff8acab801318445d88ff48faf4a28c3
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 6cb0989601f1
-  (run 'hg update' to get a working copy)
-
-  $ hg glog
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-Pushing more changesets from the local repo
---------------------------------------------
-
-  $ cd ../client
-  $ echo b > b
-  $ hg ci -Aqm "added b"
-  $ echo c > c
-  $ hg ci -Aqm "added c"
-  $ hg glog
-  @  3:bf8a6e3011b3 added c
-  |  draft
-  o  2:eaba929e866c added b
-  |  draft
-  o  1:6cb0989601f1 added a
-  |  public
-  o  0:67145f466344 initialcommit
-     public
-
-  $ hg push
-  pushing to $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  storing changesets on the bundlestore
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing 2 commits:
-      eaba929e866c  added b
-      bf8a6e3011b3  added c
-
-Checking that changesets are not applied on the server
-------------------------------------------------------
-
-  $ hg glog -R ../repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-Both of the new changesets are stored in a single bundle-file
-  $ scratchnodes
-  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 239585f5e61f0c09ce7106bdc1097bff731738f4
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 239585f5e61f0c09ce7106bdc1097bff731738f4
-
-Pushing more changesets to the server
--------------------------------------
-
-  $ echo d > d
-  $ hg ci -Aqm "added d"
-  $ echo e > e
-  $ hg ci -Aqm "added e"
-
-XXX: we should have pushed only the parts which are not in bundlestore
-  $ hg push
-  pushing to $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  storing changesets on the bundlestore
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing 4 commits:
-      eaba929e866c  added b
-      bf8a6e3011b3  added c
-      1bb96358eda2  added d
-      b4e4bce66051  added e
-
-Sneak peek into the bundlestore at the server
-  $ scratchnodes
-  1bb96358eda285b536c6d1c66846a7cdb2336cea 98fbae0016662521b0007da1b7bc349cd3caacd1
-  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
-  b4e4bce660512ad3e71189e14588a70ac8e31fef 98fbae0016662521b0007da1b7bc349cd3caacd1
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 98fbae0016662521b0007da1b7bc349cd3caacd1
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 98fbae0016662521b0007da1b7bc349cd3caacd1
-
-Checking if `hg pull` pulls something or `hg incoming` shows something
------------------------------------------------------------------------
-
-  $ hg incoming
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  comparing with $TESTTMP/repo
-  searching for changes
-  no changes found
-  [1]
-
-  $ hg pull
-  pulling from $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  no changes found
-
-Pulling from second client which is a localpeer to test `hg pull -r <rev>`
---------------------------------------------------------------------------
-
-Pulling the revision which is applied
-
-  $ cd ../client2
-  $ hg pull -r 6cb0989601f1
-  pulling from $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 6cb0989601f1
-  (run 'hg update' to get a working copy)
-  $ hg glog
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-Pulling the revision which is in bundlestore
-XXX: we should support pulling revisions from a local peers bundlestore without
-client side wrapping
-
-  $ hg pull -r b4e4bce660512ad3e71189e14588a70ac8e31fef
-  pulling from $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  abort: unknown revision 'b4e4bce660512ad3e71189e14588a70ac8e31fef'
-  [10]
-  $ hg glog
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-  $ cd ../client
-
-Pulling from third client which is not a localpeer
----------------------------------------------------
-
-Pulling the revision which is applied
-
-  $ cd ../client3
-  $ hg pull -r 6cb0989601f1
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 6cb0989601f1
-  (run 'hg update' to get a working copy)
-  $ hg glog
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-Pulling the revision which is in bundlestore
-
-Trying to specify short hash
-XXX: we should support this
-  $ hg pull -r b4e4bce660512
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  abort: unknown revision 'b4e4bce660512'
-  [255]
-
-XXX: we should show better message when the pull is happening from bundlestore
-  $ hg pull -r b4e4bce660512ad3e71189e14588a70ac8e31fef
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  added 4 changesets with 4 changes to 4 files
-  new changesets eaba929e866c:b4e4bce66051
-  (run 'hg update' to get a working copy)
-  $ hg glog
-  o  5:b4e4bce66051 added e
-  |  public
-  o  4:1bb96358eda2 added d
-  |  public
-  o  3:bf8a6e3011b3 added c
-  |  public
-  o  2:eaba929e866c added b
-  |  public
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-  $ cd ../client
-
-Checking storage of phase information with the bundle on bundlestore
----------------------------------------------------------------------
-
-creating a draft commit
-  $ cat >> $HGRCPATH <<EOF
-  > [phases]
-  > publish = False
-  > EOF
-  $ echo f > f
-  $ hg ci -Aqm "added f"
-  $ hg glog -r '.^::'
-  @  6:9b42578d4447 added f
-  |  draft
-  o  5:b4e4bce66051 added e
-  |  public
-  ~
-
-  $ hg push
-  pushing to $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  storing changesets on the bundlestore
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing 5 commits:
-      eaba929e866c  added b
-      bf8a6e3011b3  added c
-      1bb96358eda2  added d
-      b4e4bce66051  added e
-      9b42578d4447  added f
-
-XXX: the phase of 9b42578d4447 should not be changed here
-  $ hg glog -r .
-  @  6:9b42578d4447 added f
-  |  public
-  ~
-
-applying the bundle on the server to check preservation of phase-information
-
-  $ cd ../repo
-  $ scratchnodes
-  1bb96358eda285b536c6d1c66846a7cdb2336cea 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
-  9b42578d44473575994109161430d65dd147d16d 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  b4e4bce660512ad3e71189e14588a70ac8e31fef 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 280a46a259a268f0e740c81c5a7751bdbfaec85f
-
-  $ hg unbundle .hg/scratchbranches/filebundlestore/28/0a/280a46a259a268f0e740c81c5a7751bdbfaec85f
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  added 5 changesets with 5 changes to 5 files
-  new changesets eaba929e866c:9b42578d4447 (1 drafts)
-  (run 'hg update' to get a working copy)
-
-  $ hg glog
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  6:9b42578d4447 added f
-  |  draft
-  o  5:b4e4bce66051 added e
-  |  public
-  o  4:1bb96358eda2 added d
-  |  public
-  o  3:bf8a6e3011b3 added c
-  |  public
-  o  2:eaba929e866c added b
-  |  public
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-Checking storage of obsmarkers in the bundlestore
---------------------------------------------------
-
-enabling obsmarkers and rebase extension
-
-  $ cat >> $HGRCPATH << EOF
-  > [experimental]
-  > evolution = all
-  > [extensions]
-  > rebase =
-  > EOF
-
-  $ cd ../client
-
-  $ hg phase -r . --draft --force
-  $ hg rebase -r 6 -d 3
-  rebasing 6:9b42578d4447 tip "added f"
-
-  $ hg glog
-  @  7:99949238d9ac added f
-  |  draft
-  | o  5:b4e4bce66051 added e
-  | |  public
-  | o  4:1bb96358eda2 added d
-  |/   public
-  o  3:bf8a6e3011b3 added c
-  |  public
-  o  2:eaba929e866c added b
-  |  public
-  o  1:6cb0989601f1 added a
-  |  public
-  o  0:67145f466344 initialcommit
-     public
-
-  $ hg push -f
-  pushing to $TESTTMP/repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  storing changesets on the bundlestore
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing 1 commit:
-      99949238d9ac  added f
-
-XXX: the phase should not have changed here
-  $ hg glog -r .
-  @  7:99949238d9ac added f
-  |  public
-  ~
-
-Unbundling on server to see obsmarkers being applied
-
-  $ cd ../repo
-
-  $ scratchnodes
-  1bb96358eda285b536c6d1c66846a7cdb2336cea 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  6cb0989601f1fb5805238edfb16f3606713d9a0b 3b414252ff8acab801318445d88ff48faf4a28c3
-  99949238d9ac7f2424a33a46dface6f866afd059 090a24fe63f31d3b4bee714447f835c8c362ff57
-  9b42578d44473575994109161430d65dd147d16d 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  b4e4bce660512ad3e71189e14588a70ac8e31fef 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  bf8a6e3011b345146bbbedbcb1ebd4837571492a 280a46a259a268f0e740c81c5a7751bdbfaec85f
-  eaba929e866c59bc9a6aada5a9dd2f6990db83c0 280a46a259a268f0e740c81c5a7751bdbfaec85f
-
-  $ hg glog
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact (chg !)
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be (chg !)
-  unused and barring learning of users of this functionality, we drop this (chg !)
-  extension in Mercurial 6.6. (chg !)
-  o  6:9b42578d4447 added f
-  |  draft
-  o  5:b4e4bce66051 added e
-  |  public
-  o  4:1bb96358eda2 added d
-  |  public
-  o  3:bf8a6e3011b3 added c
-  |  public
-  o  2:eaba929e866c added b
-  |  public
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
-
-  $ hg unbundle .hg/scratchbranches/filebundlestore/09/0a/090a24fe63f31d3b4bee714447f835c8c362ff57
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 0 changes to 1 files (+1 heads)
-  1 new obsolescence markers
-  obsoleted 1 changesets
-  new changesets 99949238d9ac (1 drafts)
-  (run 'hg heads' to see heads, 'hg merge' to merge)
-
-  $ hg glog
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  7:99949238d9ac added f
-  |  draft
-  | o  5:b4e4bce66051 added e
-  | |  public
-  | o  4:1bb96358eda2 added d
-  |/   public
-  o  3:bf8a6e3011b3 added c
-  |  public
-  o  2:eaba929e866c added b
-  |  public
-  o  1:6cb0989601f1 added a
-  |  public
-  @  0:67145f466344 initialcommit
-     public
--- a/tests/test-infinitepush.t	Mon Nov 06 15:38:27 2023 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,472 +0,0 @@
-#require no-reposimplestore no-chg
-
-XXX-CHG this test hangs if `hg` is really `chg`. This was hidden by the use of
-`alias hg=chg` by run-tests.py. With such alias removed, this test is revealed
-buggy. This need to be resolved sooner than later.
-
-
-Testing infinipush extension and the confi options provided by it
-
-Setup
-
-  $ . "$TESTDIR/library-infinitepush.sh"
-  $ cp $HGRCPATH $TESTTMP/defaulthgrc
-  $ setupcommon
-  $ hg init repo
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd repo
-  $ setupserver
-  $ echo initialcommit > initialcommit
-  $ hg ci -Aqm "initialcommit"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg phase --public .
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-
-  $ cd ..
-  $ hg clone ssh://user@dummy/repo client -q
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-
-Create two heads. Push first head alone, then two heads together. Make sure that
-multihead push works.
-  $ cd client
-  $ echo multihead1 > multihead1
-  $ hg add multihead1
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg ci -m "multihead1"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg up null
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  0 files updated, 0 files merged, 2 files removed, 0 files unresolved
-  $ echo multihead2 > multihead2
-  $ hg ci -Am "multihead2"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding multihead2
-  created new head
-  $ hg push -r . --bundle-store
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 1 commit:
-  remote:     ee4802bf6864  multihead2
-  $ hg push -r '1:2' --bundle-store
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 2 commits:
-  remote:     bc22f9a30a82  multihead1
-  remote:     ee4802bf6864  multihead2
-  $ scratchnodes
-  bc22f9a30a821118244deacbd732e394ed0b686c de1b7d132ba98f0172cd974e3e69dfa80faa335c
-  ee4802bf6864326a6b3dcfff5a03abc2a0a69b8f de1b7d132ba98f0172cd974e3e69dfa80faa335c
-
-Create two new scratch bookmarks
-  $ hg up 0
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ echo scratchfirstpart > scratchfirstpart
-  $ hg ci -Am "scratchfirstpart"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding scratchfirstpart
-  created new head
-  $ hg push -r . -B scratch/firstpart
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 1 commit:
-  remote:     176993b87e39  scratchfirstpart
-  $ hg up 0
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-  $ echo scratchsecondpart > scratchsecondpart
-  $ hg ci -Am "scratchsecondpart"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding scratchsecondpart
-  created new head
-  $ hg push -r . -B scratch/secondpart
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 1 commit:
-  remote:     8db3891c220e  scratchsecondpart
-
-Pull two bookmarks from the second client
-  $ cd ..
-  $ hg clone ssh://user@dummy/repo client2 -q
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd client2
-  $ hg pull -B scratch/firstpart -B scratch/secondpart
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets * (glob)
-  (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg log -r scratch/secondpart -T '{node}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  8db3891c220e216f6da214e8254bd4371f55efca (no-eol)
-  $ hg log -r scratch/firstpart -T '{node}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  176993b87e39bd88d66a2cccadabe33f0b346339 (no-eol)
-Make two commits to the scratch branch
-
-  $ echo testpullbycommithash1 > testpullbycommithash1
-  $ hg ci -Am "testpullbycommithash1"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  adding testpullbycommithash1
-  created new head
-  $ hg log -r '.' -T '{node}\n' > ../testpullbycommithash1
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ echo testpullbycommithash2 > testpullbycommithash2
-  $ hg ci -Aqm "testpullbycommithash2"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r . -B scratch/mybranch -q
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-
-Create third client and pull by commit hash.
-Make sure testpullbycommithash2 has not fetched
-  $ cd ..
-  $ hg clone ssh://user@dummy/repo client3 -q
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd client3
-  $ hg pull -r `cat ../testpullbycommithash1`
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files
-  new changesets 33910bfe6ffe (1 drafts)
-  (run 'hg update' to get a working copy)
-  $ hg log -G -T '{desc} {phase} {bookmarks}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  testpullbycommithash1 draft
-  |
-  @  initialcommit public
-  
-Make public commit in the repo and pull it.
-Make sure phase on the client is public.
-  $ cd ../repo
-  $ echo publiccommit > publiccommit
-  $ hg ci -Aqm "publiccommit"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg phase --public .
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ cd ../client3
-  $ hg pull
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 1 changesets with 1 changes to 1 files (+1 heads)
-  new changesets a79b6597f322
-  (run 'hg heads' to see heads, 'hg merge' to merge)
-  $ hg log -G -T '{desc} {phase} {bookmarks} {node|short}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  o  publiccommit public  a79b6597f322
-  |
-  | o  testpullbycommithash1 draft  33910bfe6ffe
-  |/
-  @  initialcommit public  67145f466344
-  
-  $ hg up a79b6597f322
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ echo scratchontopofpublic > scratchontopofpublic
-  $ hg ci -Aqm "scratchontopofpublic"
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  $ hg push -r . -B scratch/scratchontopofpublic
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pushing to ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  remote: pushing 1 commit:
-  remote:     c70aee6da07d  scratchontopofpublic
-  $ cd ../client2
-  $ hg pull -B scratch/scratchontopofpublic
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  pulling from ssh://user@dummy/repo
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  searching for changes
-  remote: IMPORTANT: if you use this extension, please contact
-  remote: mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  remote: unused and barring learning of users of this functionality, we drop this
-  remote: extension in Mercurial 6.6.
-  adding changesets
-  adding manifests
-  adding file changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 2 changesets with 2 changes to 2 files (+1 heads)
-  new changesets a79b6597f322:c70aee6da07d (1 drafts)
-  (run 'hg heads .' to see heads, 'hg merge' to merge)
-  $ hg log -r scratch/scratchontopofpublic -T '{phase}'
-  IMPORTANT: if you use this extension, please contact
-  mercurial-devel@mercurial-scm.org IMMEDIATELY. This extension is believed to be
-  unused and barring learning of users of this functionality, we drop this
-  extension in Mercurial 6.6.
-  draft (no-eol)
--- a/tests/test-journal.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-journal.t	Tue Nov 07 15:21:11 2023 +0100
@@ -84,6 +84,17 @@
   cb9a9f314b8b  book -f bar
   1e6c11564562  book -r tip bar
 
+Test that we tracks bookmark deletion
+
+  $ hg book -r . babar
+  $ hg book -f -r .~1 babar
+  $ hg book -d babar
+  $ hg journal babar
+  previous locations of 'babar':
+  000000000000  book -d babar
+  cb9a9f314b8b  book -f -r '.~1' babar
+  1e6c11564562  book -r . babar
+
 Test that bookmarks and working copy tracking is not mixed
 
   $ hg journal
@@ -99,6 +110,9 @@
   $ hg journal --all
   previous locations of the working copy and bookmarks:
   1e6c11564562  baz       book -r tip baz
+  000000000000  babar     book -d babar
+  cb9a9f314b8b  babar     book -f -r '.~1' babar
+  1e6c11564562  babar     book -r . babar
   1e6c11564562  bar       up
   1e6c11564562  .         up
   cb9a9f314b8b  bar       book -f bar
@@ -127,6 +141,9 @@
   $ hg journal "re:ba."
   previous locations of 're:ba.':
   1e6c11564562  baz       book -r tip baz
+  000000000000  babar     book -d babar
+  cb9a9f314b8b  babar     book -f -r '.~1' babar
+  1e6c11564562  babar     book -r . babar
   1e6c11564562  bar       up
   cb9a9f314b8b  bar       book -f bar
   1e6c11564562  bar       book -r tip bar
@@ -136,6 +153,9 @@
   $ hg journal --verbose --all
   previous locations of the working copy and bookmarks:
   000000000000 -> 1e6c11564562 foobar    baz      1970-01-01 00:00 +0000  book -r tip baz
+  cb9a9f314b8b -> 000000000000 foobar    babar    1970-01-01 00:00 +0000  book -d babar
+  1e6c11564562 -> cb9a9f314b8b foobar    babar    1970-01-01 00:00 +0000  book -f -r '.~1' babar
+  000000000000 -> 1e6c11564562 foobar    babar    1970-01-01 00:00 +0000  book -r . babar
   cb9a9f314b8b -> 1e6c11564562 foobar    bar      1970-01-01 00:00 +0000  up
   cb9a9f314b8b -> 1e6c11564562 foobar    .        1970-01-01 00:00 +0000  up
   1e6c11564562 -> cb9a9f314b8b foobar    bar      1970-01-01 00:00 +0000  book -f bar
--- a/tests/test-lfs.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-lfs.t	Tue Nov 07 15:21:11 2023 +0100
@@ -829,7 +829,7 @@
   > eh = exthelper.exthelper()
   > uisetup = eh.finaluisetup
   > 
-  > @eh.wrapfunction(wrapper, b'filelogrenamed')
+  > @eh.wrapfunction(wrapper, 'filelogrenamed')
   > def filelogrenamed(orig, orig1, self, node):
   >     ret = orig(orig1, self, node)
   >     if wrapper._islfs(self._revlog, node) and ret:
--- a/tests/test-narrow-expanddirstate.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-narrow-expanddirstate.t	Tue Nov 07 15:21:11 2023 +0100
@@ -99,7 +99,7 @@
   >       expandnarrowspec(ui, repo, encoding.environ.get(b'PATCHINCLUDES'))
   >       return orig(ui, repo, *args, **kwargs)
   > 
-  >   extensions.wrapfunction(patch, b'patch', overridepatch)
+  >   extensions.wrapfunction(patch, 'patch', overridepatch)
   > EOF
   $ cat >> ".hg/hgrc" <<EOF
   > [extensions]
--- a/tests/test-parseindex.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-parseindex.t	Tue Nov 07 15:21:11 2023 +0100
@@ -155,9 +155,9 @@
        1 0000       65      1      0      2 26333235a41c
 
   $ hg -R limit debugdeltachain -c
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0       2      -1       1        1       -1    base         63         62         63   1.01613        63         0    0.00000
-        1       0       2       2        1       -1    base         66         65         66   1.01538        66         0    0.00000
+      rev      p1      p2  chain# chainlen     prev   delta
+        0       2      -1       1        1       -1    base
+        1       0       2       2        1       -1    base
 
   $ hg -R neglimit debugrevlogindex -f1 -c
      rev flag     size   link     p1     p2       nodeid
@@ -170,9 +170,9 @@
        1 0000       65      1      0  65536 26333235a41c
 
   $ hg -R segv debugdeltachain -c
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio
-        0   65536      -1       1        1       -1    base         63         62         63   1.01613        63         0    0.00000
-        1       0   65536       2        1       -1    base         66         65         66   1.01538        66         0    0.00000
+      rev      p1      p2  chain# chainlen     prev   delta
+        0   65536      -1       1        1       -1    base
+        1       0   65536       2        1       -1    base
 
   $ cat <<EOF > test.py
   > import sys
--- a/tests/test-phases.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-phases.t	Tue Nov 07 15:21:11 2023 +0100
@@ -1000,6 +1000,23 @@
      date:        Thu Jan 01 00:00:00 1970 +0000
      summary:     A
   
+The hidden commit is an orphan but doesn't show up without --hidden
+And internal changesets are not considered for unstability.
+
+  $ hg debugobsolete `hg id --debug -ir 0`
+  1 new obsolescence markers
+  obsoleted 1 changesets
+  $ hg --hidden log -G -r '(0::) - 0'
+  o  changeset:   1:c01c42dffc7f
+  |  tag:         tip
+  ~  user:        test
+     date:        Thu Jan 01 00:00:00 1970 +0000
+     summary:     my test internal commit
+  
+  $ hg --hidden log -G -r 'unstable()'
+
+  $ hg log -G -r 'unstable()'
+
 
 Test for archived phase
 -----------------------
--- a/tests/test-push-race.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-push-race.t	Tue Nov 07 15:21:11 2023 +0100
@@ -76,7 +76,7 @@
   >     return orig(pushop)
   > 
   > def uisetup(ui):
-  >     extensions.wrapfunction(exchange, b'_pushbundle2', delaypush)
+  >     extensions.wrapfunction(exchange, '_pushbundle2', delaypush)
   > EOF
 
   $ waiton () {
--- a/tests/test-remotefilelog-bundle2-legacy.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-remotefilelog-bundle2-legacy.t	Tue Nov 07 15:21:11 2023 +0100
@@ -11,7 +11,7 @@
   > command = registrar.command(cmdtable)
   > @command('testcg2', norepo=True)
   > def testcg2(ui):
-  >     if not util.safehasattr(changegroup, 'cg2packer'):
+  >     if not hasattr(changegroup, 'cg2packer'):
   >         sys.exit(80)
   > EOF
   $ cat >> $HGRCPATH << EOF
--- a/tests/test-revlog-raw.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-revlog-raw.py	Tue Nov 07 15:21:11 2023 +0100
@@ -50,7 +50,7 @@
 tvfs.options = {
     b'generaldelta': True,
     b'revlogv1': True,
-    b'sparse-revlog': True,
+    b'delta-config': revlog.DeltaConfig(sparse_revlog=True),
 }
 
 
@@ -158,7 +158,7 @@
             else:
                 # suboptimal deltaparent
                 deltaparent = min(0, parentrev)
-            if not rlog.candelta(deltaparent, r):
+            if not rlog._candelta(deltaparent, r):
                 deltaparent = -1
             return {
                 b'node': rlog.node(r),
@@ -371,11 +371,15 @@
 
 
 def slicingtest(rlog):
-    oldmin = rlog._srmingapsize
+    old_delta_config = rlog.delta_config
+    old_data_config = rlog.data_config
+    rlog.delta_config = rlog.delta_config.copy()
+    rlog.data_config = rlog.data_config.copy()
     try:
         # the test revlog is small, we remove the floor under which we
         # slicing is diregarded.
-        rlog._srmingapsize = 0
+        rlog.data_config.sr_min_gap_size = 0
+        rlog.delta_config.sr_min_gap_size = 0
         for item in slicingdata:
             chain, expected, target = item
             result = deltas.slicechunk(rlog, chain, targetsize=target)
@@ -387,7 +391,8 @@
                 print('  expected: %s' % expected)
                 print('  result:   %s' % result)
     finally:
-        rlog._srmingapsize = oldmin
+        rlog.delta_config = old_delta_config
+        rlog.data_config = old_data_config
 
 
 def md5sum(s):
--- a/tests/test-revlog.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-revlog.t	Tue Nov 07 15:21:11 2023 +0100
@@ -76,10 +76,10 @@
   $ tar -xf - < "$TESTDIR"/bundles/test-revlog-diff-relative-to-nullrev.tar
   $ cd nullrev-diff
   $ hg debugdeltachain a
-      rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio   readsize largestblk rddensity srchunks
-        0      -1      -1       1        2       -1      p1         15          3         15   5.00000        15         0    0.00000         15         15   1.00000        1
-        1       0      -1       1        2       -1      p2         15          3         15   5.00000        30        15    1.00000         30         30   0.50000        1
-        2      -1      -1       1        2       -1      p1         15          3         15   5.00000        45        30    2.00000         45         45   0.33333        1
+      rev      p1      p2  chain# chainlen     prev   delta
+        0      -1      -1       1        2       -1      p1
+        1       0      -1       1        2       -1      p2
+        2      -1      -1       1        2       -1      p1
   $ hg cat --config rhg.cat=true -r 0 a
   hi
   $ hg cat --config rhg.cat=true -r 1 a
--- a/tests/test-rust-ancestor.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-rust-ancestor.py	Tue Nov 07 15:21:11 2023 +0100
@@ -2,7 +2,6 @@
 import unittest
 
 from mercurial.node import wdirrev
-from mercurial import error
 
 from mercurial.testing import revlog as revlogtesting
 
@@ -144,11 +143,15 @@
 
     def testwdirunsupported(self):
         # trying to access ancestors of the working directory raises
-        # WdirUnsupported directly
         idx = self.parseindex()
-        with self.assertRaises(error.WdirUnsupported):
+        with self.assertRaises(rustext.GraphError) as arc:
             list(AncestorsIterator(idx, [wdirrev], -1, False))
 
+        exc = arc.exception
+        self.assertIsInstance(exc, ValueError)
+        # rust-cpython issues appropriate str instances for Python 2 and 3
+        self.assertEqual(exc.args, ('InvalidRevision', wdirrev))
+
     def testheadrevs(self):
         idx = self.parseindex()
         self.assertEqual(dagop.headrevs(idx, [1, 2, 3]), {3})
--- a/tests/test-sparse-revlog.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-sparse-revlog.t	Tue Nov 07 15:21:11 2023 +0100
@@ -161,7 +161,7 @@
   $ ls -1
   SPARSE-REVLOG-TEST-FILE
   $ hg debugdeltachain SPARSE-REVLOG-TEST-FILE | grep snap | tail -1
-     4971    4970      -1       3        5     4930    snap      19179     346472     427596   1.23414  15994877  15567281   36.40652     427596     179288   1.00000        5
+     4971    4970      -1       3        5     4930    snap
   $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971
   DBG-DELTAS-SEARCH: SEARCH rev=4971
   DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
--- a/tests/test-ssh-bundle1.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-ssh-bundle1.t	Tue Nov 07 15:21:11 2023 +0100
@@ -427,7 +427,7 @@
   >     return res
   > 
   > def extsetup(ui):
-  >     extensions.wrapfunction(exchange, b'push', wrappedpush)
+  >     extensions.wrapfunction(exchange, 'push', wrappedpush)
   > EOF
 
   $ cat >> .hg/hgrc << EOF
--- a/tests/test-ssh.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-ssh.t	Tue Nov 07 15:21:11 2023 +0100
@@ -479,7 +479,7 @@
   >     return res
   > 
   > def extsetup(ui):
-  >     extensions.wrapfunction(exchange, b'push', wrappedpush)
+  >     extensions.wrapfunction(exchange, 'push', wrappedpush)
   > EOF
 
   $ cat >> .hg/hgrc << EOF
--- a/tests/test-strip.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-strip.t	Tue Nov 07 15:21:11 2023 +0100
@@ -970,7 +970,7 @@
   >          transaction.addpostclose(b"phase invalidation test", test)
   >     return transaction
   > def extsetup(ui):
-  >     extensions.wrapfunction(localrepo.localrepository, b"transaction",
+  >     extensions.wrapfunction(localrepo.localrepository, "transaction",
   >                             transactioncallback)
   > EOF
   $ hg up -C 2
--- a/tests/test-transaction-rollback-on-revlog-split.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-transaction-rollback-on-revlog-split.t	Tue Nov 07 15:21:11 2023 +0100
@@ -400,7 +400,6 @@
 The split was rollback
 
   $ f -s .hg/store/data*/file*
-  .hg/store/data/file.d: size=0
   .hg/store/data/file.i: size=1174
 
   $ hg tip
--- a/tests/test-upgrade-repo.t	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/test-upgrade-repo.t	Tue Nov 07 15:21:11 2023 +0100
@@ -1427,7 +1427,7 @@
   $ hg config format
   format.revlog-compression=$BUNDLE2_COMPRESSIONS$
   format.maxchainlen=9001
-  $ hg debugdeltachain file
+  $ hg debugdeltachain file --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio   readsize largestblk rddensity srchunks
         0      -1      -1       1        1       -1    base         77        182         77   0.42308        77         0    0.00000         77         77   1.00000        1
         1       0      -1       1        2        0      p1         21        191         98   0.51309        98         0    0.00000         98         98   1.00000        1
@@ -1475,7 +1475,7 @@
   removing temporary repository $TESTTMP/localconfig/.hg/upgrade.* (glob)
   copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
   the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
-  $ hg debugdeltachain file
+  $ hg debugdeltachain file --all-info
       rev      p1      p2  chain# chainlen     prev   delta       size    rawsize  chainsize     ratio   lindist extradist extraratio   readsize largestblk rddensity srchunks
         0      -1      -1       1        1       -1    base         77        182         77   0.42308        77         0    0.00000         77         77   1.00000        1
         1       0      -1       1        2        0      p1         21        191         98   0.51309        98         0    0.00000         98         98   1.00000        1
--- a/tests/testlib/ext-sidedata.py	Mon Nov 06 15:38:27 2023 +0100
+++ b/tests/testlib/ext-sidedata.py	Tue Nov 07 15:21:11 2023 +0100
@@ -42,7 +42,7 @@
     sd = self.sidedata(nodeorrev)
     if getattr(self, 'sidedatanocheck', False):
         return text
-    if self.hassidedata:
+    if self.feature_config.has_side_data:
         return text
     if nodeorrev != nullrev and nodeorrev != self.nullid:
         cat1 = sd.get(sidedata.SD_TEST1)