branching: merge stable into default
authorRaphaël Gomès <rgomes@octobus.net>
Mon, 17 May 2021 15:05:24 +0200
changeset 47233 bcafcd779d2e
parent 47232 33096e77598c (diff)
parent 47183 8be95673eb8a (current diff)
child 47234 616b8f412676
branching: merge stable into default
mercurial/hg.py
mercurial/localrepo.py
mercurial/ui.py
mercurial/util.py
tests/run-tests.py
--- a/contrib/automation/hgautomation/aws.py	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/automation/hgautomation/aws.py	Mon May 17 15:05:24 2021 +0200
@@ -925,10 +925,15 @@
     requirements3_path = (
         pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.txt'
     )
+    requirements35_path = (
+        pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.5.txt'
+    )
     with requirements2_path.open('r', encoding='utf-8') as fh:
         requirements2 = fh.read()
     with requirements3_path.open('r', encoding='utf-8') as fh:
         requirements3 = fh.read()
+    with requirements35_path.open('r', encoding='utf-8') as fh:
+        requirements35 = fh.read()
 
     # Compute a deterministic fingerprint to determine whether image needs to
     # be regenerated.
@@ -938,6 +943,7 @@
             'bootstrap_script': BOOTSTRAP_DEBIAN,
             'requirements_py2': requirements2,
             'requirements_py3': requirements3,
+            'requirements_py35': requirements35,
         }
     )
 
@@ -979,6 +985,10 @@
                 fh.write(requirements3)
                 fh.chmod(0o0700)
 
+            with sftp.open('%s/requirements-py3.5.txt' % home, 'wb') as fh:
+                fh.write(requirements35)
+                fh.chmod(0o0700)
+
             print('executing bootstrap')
             chan, stdin, stdout = ssh_exec_command(
                 client, '%s/bootstrap' % home
--- a/contrib/automation/hgautomation/linux.py	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/automation/hgautomation/linux.py	Mon May 17 15:05:24 2021 +0200
@@ -26,11 +26,11 @@
 
 INSTALL_PYTHONS = r'''
 PYENV2_VERSIONS="2.7.17 pypy2.7-7.2.0"
-PYENV3_VERSIONS="3.5.10 3.6.12 3.7.9 3.8.6 3.9.0 pypy3.5-7.0.0 pypy3.6-7.3.0"
+PYENV3_VERSIONS="3.5.10 3.6.13 3.7.10 3.8.10 3.9.5 pypy3.5-7.0.0 pypy3.6-7.3.3 pypy3.7-7.3.3"
 
 git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
 pushd /hgdev/pyenv
-git checkout 8ac91b4fd678a8c04356f5ec85cfcd565c265e9a
+git checkout 328fd42c3a2fbf14ae46dae2021a087fe27ba7e2
 popd
 
 export PYENV_ROOT="/hgdev/pyenv"
@@ -56,7 +56,20 @@
 for v in ${PYENV3_VERSIONS}; do
     pyenv install -v ${v}
     ${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
-    ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
+
+    case ${v} in
+        3.5.*)
+            REQUIREMENTS=requirements-py3.5.txt
+            ;;
+        pypy3.5*)
+            REQUIREMENTS=requirements-py3.5.txt
+            ;;
+        *)
+            REQUIREMENTS=requirements-py3.txt
+            ;;
+    esac
+
+    ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/${REQUIREMENTS}
 done
 
 pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
@@ -64,6 +77,18 @@
     '\r\n', '\n'
 )
 
+INSTALL_PYOXIDIZER = r'''
+PYOXIDIZER_VERSION=0.16.0
+PYOXIDIZER_SHA256=8875471c270312fbb934007fd30f65f1904cc0f5da6188d61c90ed2129b9f9c1
+PYOXIDIZER_URL=https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F${PYOXIDIZER_VERSION}/pyoxidizer-${PYOXIDIZER_VERSION}-linux_x86_64.zip
+
+wget -O pyoxidizer.zip --progress dot:mega ${PYOXIDIZER_URL}
+echo "${PYOXIDIZER_SHA256} pyoxidizer.zip" | sha256sum --check -
+
+unzip pyoxidizer.zip
+chmod +x pyoxidizer
+sudo mv pyoxidizer /usr/local/bin/pyoxidizer
+'''
 
 INSTALL_RUST = r'''
 RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
@@ -72,10 +97,8 @@
 
 chmod +x rustup-init
 sudo -H -u hg -g hg ./rustup-init -y
-sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.46.0
+sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.41.1 1.52.0
 sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
-
-sudo -H -u hg -g hg /home/hg/.cargo/bin/cargo install --version 0.10.3 pyoxidizer
 '''
 
 
@@ -306,9 +329,9 @@
 sudo chown `whoami` /hgdev
 
 {install_rust}
+{install_pyoxidizer}
 
-cp requirements-py2.txt /hgdev/requirements-py2.txt
-cp requirements-py3.txt /hgdev/requirements-py3.txt
+cp requirements-*.txt /hgdev/
 
 # Disable the pip version check because it uses the network and can
 # be annoying.
@@ -332,6 +355,7 @@
 '''.lstrip()
     .format(
         install_rust=INSTALL_RUST,
+        install_pyoxidizer=INSTALL_PYOXIDIZER,
         install_pythons=INSTALL_PYTHONS,
         bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV,
     )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/automation/linux-requirements-py3.5.txt	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,194 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.5.txt contrib/automation/linux-requirements.txt.in
+#
+astroid==2.4.2 \
+    --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
+    --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386
+    # via pylint
+docutils==0.17.1 \
+    --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
+    --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
+    # via -r contrib/automation/linux-requirements.txt.in
+fuzzywuzzy==0.18.0 \
+    --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
+    --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
+    # via -r contrib/automation/linux-requirements.txt.in
+idna==3.1 \
+    --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
+    --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
+    # via yarl
+isort==4.3.21 \
+    --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
+    --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
+    # via
+    #   -r contrib/automation/linux-requirements.txt.in
+    #   pylint
+lazy-object-proxy==1.4.3 \
+    --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
+    --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
+    --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
+    --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
+    --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
+    --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
+    --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
+    --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
+    --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
+    --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
+    --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
+    --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
+    --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
+    --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
+    --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
+    --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
+    --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
+    --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
+    --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
+    --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
+    --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0
+    # via astroid
+mccabe==0.6.1 \
+    --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
+    --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
+    # via pylint
+multidict==5.0.2 \
+    --hash=sha256:060d68ae3e674c913ec41a464916f12c4d7ff17a3a9ebbf37ba7f2c681c2b33e \
+    --hash=sha256:06f39f0ddc308dab4e5fa282d145f90cd38d7ed75390fc83335636909a9ec191 \
+    --hash=sha256:17847fede1aafdb7e74e01bb34ab47a1a1ea726e8184c623c45d7e428d2d5d34 \
+    --hash=sha256:1cd102057b09223b919f9447c669cf2efabeefb42a42ae6233f25ffd7ee31a79 \
+    --hash=sha256:20cc9b2dd31761990abff7d0e63cd14dbfca4ebb52a77afc917b603473951a38 \
+    --hash=sha256:2576e30bbec004e863d87216bc34abe24962cc2e964613241a1c01c7681092ab \
+    --hash=sha256:2ab9cad4c5ef5c41e1123ed1f89f555aabefb9391d4e01fd6182de970b7267ed \
+    --hash=sha256:359ea00e1b53ceef282232308da9d9a3f60d645868a97f64df19485c7f9ef628 \
+    --hash=sha256:3e61cc244fd30bd9fdfae13bdd0c5ec65da51a86575ff1191255cae677045ffe \
+    --hash=sha256:43c7a87d8c31913311a1ab24b138254a0ee89142983b327a2c2eab7a7d10fea9 \
+    --hash=sha256:4a3f19da871befa53b48dd81ee48542f519beffa13090dc135fffc18d8fe36db \
+    --hash=sha256:4df708ef412fd9b59b7e6c77857e64c1f6b4c0116b751cb399384ec9a28baa66 \
+    --hash=sha256:59182e975b8c197d0146a003d0f0d5dc5487ce4899502061d8df585b0f51fba2 \
+    --hash=sha256:6128d2c0956fd60e39ec7d1c8f79426f0c915d36458df59ddd1f0cff0340305f \
+    --hash=sha256:6168839491a533fa75f3f5d48acbb829475e6c7d9fa5c6e245153b5f79b986a3 \
+    --hash=sha256:62abab8088704121297d39c8f47156cb8fab1da731f513e59ba73946b22cf3d0 \
+    --hash=sha256:653b2bbb0bbf282c37279dd04f429947ac92713049e1efc615f68d4e64b1dbc2 \
+    --hash=sha256:6566749cd78cb37cbf8e8171b5cd2cbfc03c99f0891de12255cf17a11c07b1a3 \
+    --hash=sha256:76cbdb22f48de64811f9ce1dd4dee09665f84f32d6a26de249a50c1e90e244e0 \
+    --hash=sha256:8efcf070d60fd497db771429b1c769a3783e3a0dd96c78c027e676990176adc5 \
+    --hash=sha256:8fa4549f341a057feec4c3139056ba73e17ed03a506469f447797a51f85081b5 \
+    --hash=sha256:9380b3f2b00b23a4106ba9dd022df3e6e2e84e1788acdbdd27603b621b3288df \
+    --hash=sha256:9ed9b280f7778ad6f71826b38a73c2fdca4077817c64bc1102fdada58e75c03c \
+    --hash=sha256:a7b8b5bd16376c8ac2977748bd978a200326af5145d8d0e7f799e2b355d425b6 \
+    --hash=sha256:af271c2540d1cd2a137bef8d95a8052230aa1cda26dd3b2c73d858d89993d518 \
+    --hash=sha256:b561e76c9e21402d9a446cdae13398f9942388b9bff529f32dfa46220af54d00 \
+    --hash=sha256:b82400ef848bbac6b9035a105ac6acaa1fb3eea0d164e35bbb21619b88e49fed \
+    --hash=sha256:b98af08d7bb37d3456a22f689819ea793e8d6961b9629322d7728c4039071641 \
+    --hash=sha256:c58e53e1c73109fdf4b759db9f2939325f510a8a5215135330fe6755921e4886 \
+    --hash=sha256:cbabfc12b401d074298bfda099c58dfa5348415ae2e4ec841290627cb7cb6b2e \
+    --hash=sha256:d4a6fb98e9e9be3f7d70fd3e852369c00a027bd5ed0f3e8ade3821bcad257408 \
+    --hash=sha256:d99da85d6890267292065e654a329e1d2f483a5d2485e347383800e616a8c0b1 \
+    --hash=sha256:e58db0e0d60029915f7fc95a8683fa815e204f2e1990f1fb46a7778d57ca8c35 \
+    --hash=sha256:e5bf89fe57f702a046c7ec718fe330ed50efd4bcf74722940db2eb0919cddb1c \
+    --hash=sha256:f612e8ef8408391a4a3366e3508bab8ef97b063b4918a317cb6e6de4415f01af \
+    --hash=sha256:f65a2442c113afde52fb09f9a6276bbc31da71add99dc76c3adf6083234e07c6 \
+    --hash=sha256:fa0503947a99a1be94f799fac89d67a5e20c333e78ddae16e8534b151cdc588a
+    # via yarl
+pyflakes==2.3.1 \
+    --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
+    --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
+    # via -r contrib/automation/linux-requirements.txt.in
+pygments==2.9.0 \
+    --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
+    --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
+    # via -r contrib/automation/linux-requirements.txt.in
+pylint==2.6.2 \
+    --hash=sha256:718b74786ea7ed07aa0c58bf572154d4679f960d26e9641cc1de204a30b87fc9 \
+    --hash=sha256:e71c2e9614a4f06e36498f310027942b0f4f2fde20aebb01655b31edc63b9eaf
+    # via -r contrib/automation/linux-requirements.txt.in
+python-levenshtein==0.12.2 \
+    --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
+    # via -r contrib/automation/linux-requirements.txt.in
+pyyaml==5.3.1 \
+    --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
+    --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
+    --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
+    --hash=sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e \
+    --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
+    --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
+    --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
+    --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
+    --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
+    --hash=sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a \
+    --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
+    --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
+    --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a
+    # via vcrpy
+six==1.16.0 \
+    --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+    --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+    # via
+    #   astroid
+    #   vcrpy
+toml==0.10.2 \
+    --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+    --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+    # via pylint
+typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+    --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
+    --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
+    --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
+    --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
+    --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
+    --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
+    --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
+    --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
+    --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
+    --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
+    --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
+    --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
+    --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
+    --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
+    --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
+    --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
+    --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
+    --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
+    --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
+    --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
+    --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
+    --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
+    --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
+    --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
+    --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
+    --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
+    --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
+    --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
+    --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
+    --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
+    # via
+    #   -r contrib/automation/linux-requirements.txt.in
+    #   astroid
+vcrpy==4.1.1 \
+    --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
+    --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
+    # via -r contrib/automation/linux-requirements.txt.in
+wrapt==1.12.1 \
+    --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
+    # via
+    #   astroid
+    #   vcrpy
+yarl==1.3.0 \
+    --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
+    --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \
+    --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \
+    --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \
+    --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \
+    --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \
+    --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \
+    --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \
+    --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \
+    --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
+    --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1
+    # via vcrpy
+
+# WARNING: The following packages were not pinned, but pip requires them to be
+# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# setuptools
--- a/contrib/automation/linux-requirements-py3.txt	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/automation/linux-requirements-py3.txt	Mon May 17 15:05:24 2021 +0200
@@ -6,208 +6,299 @@
 #
 appdirs==1.4.4 \
     --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \
-    --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 \
+    --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128
     # via black
-astroid==2.4.2 \
-    --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
-    --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386 \
+astroid==2.5.6 \
+    --hash=sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e \
+    --hash=sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975
     # via pylint
-attrs==20.2.0 \
-    --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \
-    --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc \
+attrs==21.1.0 \
+    --hash=sha256:3901be1cb7c2a780f14668691474d9252c070a756be0a9ead98cfeabfa11aeb8 \
+    --hash=sha256:8ee1e5f5a1afc5b19bdfae4fdf0c35ed324074bdce3500c939842c8f818645d9
     # via black
 black==19.10b0 ; python_version >= "3.6" and platform_python_implementation != "PyPy" \
     --hash=sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b \
-    --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539 \
+    --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539
     # via -r contrib/automation/linux-requirements.txt.in
 click==7.1.2 \
     --hash=sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a \
-    --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc \
+    --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc
     # via black
-docutils==0.16 \
-    --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
-    --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+docutils==0.17.1 \
+    --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
+    --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
     # via -r contrib/automation/linux-requirements.txt.in
 fuzzywuzzy==0.18.0 \
     --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
-    --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 \
+    --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
     # via -r contrib/automation/linux-requirements.txt.in
-idna==2.10 \
-    --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
-    --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 \
+idna==3.1 \
+    --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
+    --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
     # via yarl
 isort==4.3.21 \
     --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
-    --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
-    # via -r contrib/automation/linux-requirements.txt.in, pylint
-lazy-object-proxy==1.4.3 \
-    --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
-    --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
-    --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
-    --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
-    --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
-    --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
-    --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
-    --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
-    --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
-    --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
-    --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
-    --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
-    --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
-    --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
-    --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
-    --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
-    --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
-    --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
-    --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
-    --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
-    --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0 \
+    --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
+    # via
+    #   -r contrib/automation/linux-requirements.txt.in
+    #   pylint
+lazy-object-proxy==1.6.0 \
+    --hash=sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653 \
+    --hash=sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61 \
+    --hash=sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2 \
+    --hash=sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837 \
+    --hash=sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3 \
+    --hash=sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43 \
+    --hash=sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726 \
+    --hash=sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3 \
+    --hash=sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587 \
+    --hash=sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8 \
+    --hash=sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a \
+    --hash=sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd \
+    --hash=sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f \
+    --hash=sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad \
+    --hash=sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4 \
+    --hash=sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b \
+    --hash=sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf \
+    --hash=sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981 \
+    --hash=sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741 \
+    --hash=sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e \
+    --hash=sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93 \
+    --hash=sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b
     # via astroid
 mccabe==0.6.1 \
     --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
-    --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
+    --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
     # via pylint
-multidict==4.7.6 \
-    --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \
-    --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \
-    --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \
-    --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \
-    --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \
-    --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \
-    --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \
-    --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \
-    --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \
-    --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \
-    --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \
-    --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \
-    --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \
-    --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \
-    --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \
-    --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \
-    --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d \
+multidict==5.1.0 \
+    --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
+    --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
+    --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
+    --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
+    --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
+    --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
+    --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
+    --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \
+    --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
+    --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
+    --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
+    --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
+    --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
+    --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
+    --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
+    --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
+    --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
+    --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
+    --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
+    --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
+    --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
+    --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
+    --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
+    --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
+    --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
+    --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
+    --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
+    --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
+    --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
+    --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
+    --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
+    --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
+    --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
+    --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
+    --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
+    --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
+    --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80
     # via yarl
-pathspec==0.8.0 \
-    --hash=sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0 \
-    --hash=sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061 \
+pathspec==0.8.1 \
+    --hash=sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd \
+    --hash=sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d
     # via black
-pyflakes==2.2.0 \
-    --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \
-    --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 \
+pyflakes==2.3.1 \
+    --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
+    --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
     # via -r contrib/automation/linux-requirements.txt.in
-pygments==2.7.1 \
-    --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
-    --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
+pygments==2.9.0 \
+    --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
+    --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
     # via -r contrib/automation/linux-requirements.txt.in
-pylint==2.6.0 \
-    --hash=sha256:bb4a908c9dadbc3aac18860550e870f58e1a02c9f2c204fdf5693d73be061210 \
-    --hash=sha256:bfe68f020f8a0fece830a22dd4d5dddb4ecc6137db04face4c3420a46a52239f \
+pylint==2.8.2 \
+    --hash=sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217 \
+    --hash=sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b
+    # via -r contrib/automation/linux-requirements.txt.in
+python-levenshtein==0.12.2 \
+    --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
     # via -r contrib/automation/linux-requirements.txt.in
-python-levenshtein==0.12.0 \
-    --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 \
-    # via -r contrib/automation/linux-requirements.txt.in
-pyyaml==5.3.1 \
-    --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
-    --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
-    --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
-    --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
-    --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
-    --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
-    --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
-    --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
-    --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
-    --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
-    --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a \
+pyyaml==5.4.1 \
+    --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
+    --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
+    --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \
+    --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \
+    --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \
+    --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \
+    --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \
+    --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \
+    --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \
+    --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \
+    --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \
+    --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \
+    --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \
+    --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \
+    --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \
+    --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \
+    --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \
+    --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \
+    --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \
+    --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \
+    --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \
+    --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \
+    --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \
+    --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \
+    --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \
+    --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
+    --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
+    --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
+    --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0
     # via vcrpy
-regex==2020.9.27 \
-    --hash=sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef \
-    --hash=sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c \
-    --hash=sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7 \
-    --hash=sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b \
-    --hash=sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c \
-    --hash=sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63 \
-    --hash=sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302 \
-    --hash=sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc \
-    --hash=sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67 \
-    --hash=sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be \
-    --hash=sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab \
-    --hash=sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650 \
-    --hash=sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81 \
-    --hash=sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19 \
-    --hash=sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637 \
-    --hash=sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc \
-    --hash=sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b \
-    --hash=sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d \
-    --hash=sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b \
-    --hash=sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100 \
-    --hash=sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad \
-    --hash=sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3 \
-    --hash=sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121 \
-    --hash=sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b \
-    --hash=sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707 \
-    --hash=sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7 \
-    --hash=sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f \
+regex==2021.4.4 \
+    --hash=sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5 \
+    --hash=sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79 \
+    --hash=sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31 \
+    --hash=sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500 \
+    --hash=sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11 \
+    --hash=sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14 \
+    --hash=sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3 \
+    --hash=sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439 \
+    --hash=sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c \
+    --hash=sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82 \
+    --hash=sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711 \
+    --hash=sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093 \
+    --hash=sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a \
+    --hash=sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb \
+    --hash=sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8 \
+    --hash=sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17 \
+    --hash=sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000 \
+    --hash=sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d \
+    --hash=sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480 \
+    --hash=sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc \
+    --hash=sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0 \
+    --hash=sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9 \
+    --hash=sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765 \
+    --hash=sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e \
+    --hash=sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a \
+    --hash=sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07 \
+    --hash=sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f \
+    --hash=sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac \
+    --hash=sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7 \
+    --hash=sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed \
+    --hash=sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968 \
+    --hash=sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7 \
+    --hash=sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2 \
+    --hash=sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4 \
+    --hash=sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87 \
+    --hash=sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8 \
+    --hash=sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10 \
+    --hash=sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29 \
+    --hash=sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605 \
+    --hash=sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6 \
+    --hash=sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042
     # via black
-six==1.15.0 \
-    --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
-    --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \
-    # via astroid, vcrpy
-toml==0.10.1 \
-    --hash=sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f \
-    --hash=sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88 \
-    # via black, pylint
-typed-ast==1.4.1 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
-    --hash=sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355 \
-    --hash=sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919 \
-    --hash=sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa \
-    --hash=sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652 \
-    --hash=sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75 \
-    --hash=sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01 \
-    --hash=sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d \
-    --hash=sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1 \
-    --hash=sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907 \
-    --hash=sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c \
-    --hash=sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3 \
-    --hash=sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b \
-    --hash=sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614 \
-    --hash=sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb \
-    --hash=sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b \
-    --hash=sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41 \
-    --hash=sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6 \
-    --hash=sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34 \
-    --hash=sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe \
-    --hash=sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4 \
-    --hash=sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7 \
-    # via -r contrib/automation/linux-requirements.txt.in, astroid, black
-typing-extensions==3.7.4.3 \
-    --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \
-    --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \
-    --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f \
+six==1.16.0 \
+    --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+    --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+    # via vcrpy
+toml==0.10.2 \
+    --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+    --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+    # via
+    #   black
+    #   pylint
+typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+    --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
+    --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
+    --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
+    --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
+    --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
+    --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
+    --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
+    --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
+    --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
+    --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
+    --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
+    --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
+    --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
+    --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
+    --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
+    --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
+    --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
+    --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
+    --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
+    --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
+    --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
+    --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
+    --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
+    --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
+    --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
+    --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
+    --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
+    --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
+    --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
+    --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
+    # via
+    #   -r contrib/automation/linux-requirements.txt.in
+    #   astroid
+    #   black
+typing-extensions==3.10.0.0 \
+    --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
+    --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
+    --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84
     # via yarl
-vcrpy==4.1.0 \
-    --hash=sha256:4138e79eb35981ad391406cbb7227bce7eba8bad788dcf1a89c2e4a8b740debe \
-    --hash=sha256:d833248442bbc560599add895c9ab0ef518676579e8dc72d8b0933bdb3880253 \
+vcrpy==4.1.1 \
+    --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
+    --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
     # via -r contrib/automation/linux-requirements.txt.in
 wrapt==1.12.1 \
-    --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
-    # via astroid, vcrpy
-yarl==1.6.0 \
-    --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \
-    --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \
-    --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \
-    --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \
-    --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \
-    --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \
-    --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \
-    --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \
-    --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \
-    --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \
-    --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \
-    --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \
-    --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \
-    --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \
-    --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \
-    --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \
-    --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a \
+    --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
+    # via
+    #   astroid
+    #   vcrpy
+yarl==1.6.3 \
+    --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
+    --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
+    --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
+    --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
+    --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
+    --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
+    --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
+    --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
+    --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
+    --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
+    --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
+    --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
+    --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
+    --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
+    --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
+    --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
+    --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
+    --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
+    --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
+    --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \
+    --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
+    --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
+    --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
+    --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
+    --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
+    --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
+    --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
+    --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
+    --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
+    --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
+    --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
+    --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
+    --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
+    --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
+    --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
+    --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
+    --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71
     # via vcrpy
 
 # WARNING: The following packages were not pinned, but pip requires them to be
--- a/contrib/chg/chg.c	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/chg/chg.c	Mon May 17 15:05:24 2021 +0200
@@ -240,13 +240,8 @@
 	const char *hgcmd = gethgcmd();
 
 	const char *baseargv[] = {
-	    hgcmd,
-	    "serve",
-	    "--cmdserver",
-	    "chgunix",
-	    "--address",
-	    opts->initsockname,
-	    "--daemon-postexec",
+	    hgcmd,     "serve",     "--no-profile",     "--cmdserver",
+	    "chgunix", "--address", opts->initsockname, "--daemon-postexec",
 	    "chdir:/",
 	};
 	size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
--- a/contrib/dumprevlog	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/dumprevlog	Mon May 17 15:05:24 2021 +0200
@@ -13,6 +13,10 @@
 )
 from mercurial.utils import procutil
 
+from mercurial.revlogutils import (
+    constants as revlog_constants,
+)
+
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
@@ -32,7 +36,16 @@
 
 
 for f in sys.argv[1:]:
-    r = revlog.revlog(binopen, encoding.strtolocal(f))
+    localf = encoding.strtolocal(f)
+    if not localf.endswith(b'.i'):
+        print("file:", f, file=sys.stderr)
+        print("  invalid filename", file=sys.stderr)
+
+    r = revlog.revlog(
+        binopen,
+        target=(revlog_constants.KIND_OTHER, b'dump-revlog'),
+        radix=localf[:-2],
+    )
     print("file:", f)
     for i in r:
         n = r.node(i)
--- a/contrib/install-windows-dependencies.ps1	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/install-windows-dependencies.ps1	Mon May 17 15:05:24 2021 +0200
@@ -32,15 +32,15 @@
 $PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe"
 $PYTHON37_x64_SHA256 = "e69ed52afb5a722e5c56f6c21d594e85c17cb29f12f18bb69751cf1714e0f987"
 
-$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6.exe"
-$PYTHON38_x86_SHA256 = "287d5df01ff22ff09e6a487ae018603ee19eade71d462ec703850c96f1d5e8a0"
-$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6-amd64.exe"
-$PYTHON38_x64_SHA256 = "328a257f189cb500606bb26ab0fbdd298ed0e05d8c36540a322a1744f489a0a0"
+$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10.exe"
+$PYTHON38_x86_SHA256 = "ad07633a1f0cd795f3bf9da33729f662281df196b4567fa795829f3bb38a30ac"
+$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe"
+$PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a"
 
-$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0.exe"
-$PYTHON39_x86_SHA256 = "a4c65917f4225d1543959342f0615c813a4e9e7ff1137c4394ff6a5290ac1913"
-$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0-amd64.exe"
-$PYTHON39_x64_SHA256 = "fd2e2c6612d43bb6b213b72fc53f07d73d99059fa72c96e44bde12e7815073ae"
+$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5.exe"
+$PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de"
+$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5-amd64.exe"
+$PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac"
 
 # PIP 19.2.3.
 $PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py"
@@ -62,6 +62,9 @@
 $RUSTUP_INIT_URL = "https://static.rust-lang.org/rustup/archive/1.21.1/x86_64-pc-windows-gnu/rustup-init.exe"
 $RUSTUP_INIT_SHA256 = "d17df34ba974b9b19cf5c75883a95475aa22ddc364591d75d174090d55711c72"
 
+$PYOXIDIZER_URL = "https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F0.16.0/PyOxidizer-0.16.0-x64.msi"
+$PYOXIDIZER_SHA256 = "2a9c58add9161c272c418d5e6dec13fbe648f624b5d26770190357e4d664f24e"
+
 # Writing progress slows down downloads substantially. So disable it.
 $progressPreference = 'silentlyContinue'
 
@@ -121,11 +124,8 @@
 
     Invoke-Process "${prefix}\assets\rustup-init.exe" "-y --default-host x86_64-pc-windows-msvc"
     Invoke-Process "${prefix}\cargo\bin\rustup.exe" "target add i686-pc-windows-msvc"
-    Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.46.0"
+    Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.52.0"
     Invoke-Process "${prefix}\cargo\bin\rustup.exe" "component add clippy"
-
-    # Install PyOxidizer for packaging.
-    Invoke-Process "${prefix}\cargo\bin\cargo.exe" "install --version 0.10.3 pyoxidizer"
 }
 
 function Install-Dependencies($prefix) {
@@ -151,6 +151,7 @@
     Secure-Download $MINGW_BIN_URL ${prefix}\assets\mingw-get-bin.zip $MINGW_BIN_SHA256
     Secure-Download $MERCURIAL_WHEEL_URL ${prefix}\assets\${MERCURIAL_WHEEL_FILENAME} $MERCURIAL_WHEEL_SHA256
     Secure-Download $RUSTUP_INIT_URL ${prefix}\assets\rustup-init.exe $RUSTUP_INIT_SHA256
+    Secure-Download $PYOXIDIZER_URL ${prefix}\assets\PyOxidizer.msi $PYOXIDIZER_SHA256
 
     Write-Output "installing Python 2.7 32-bit"
     Invoke-Process msiexec.exe "/i ${prefix}\assets\python27-x86.msi /l* ${prefix}\assets\python27-x86.log /q TARGETDIR=${prefix}\python27-x86 ALLUSERS="
@@ -172,6 +173,9 @@
     Write-Output "installing Visual Studio 2017 Build Tools and SDKs"
     Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
 
+    Write-Output "installing PyOxidizer"
+    Invoke-Process msiexec.exe "/i ${prefix}\assets\PyOxidizer.msi /l* ${prefix}\assets\PyOxidizer.log /quiet"
+
     Install-Rust ${prefix}
 
     Write-Output "installing Visual C++ 9.0 for Python 2.7"
--- a/contrib/packaging/hgpackaging/inno.py	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/packaging/hgpackaging/inno.py	Mon May 17 15:05:24 2021 +0200
@@ -18,7 +18,7 @@
     build_py2exe,
     stage_install,
 )
-from .pyoxidizer import run_pyoxidizer
+from .pyoxidizer import create_pyoxidizer_install_layout
 from .util import (
     find_legacy_vc_runtime_files,
     normalize_windows_version,
@@ -136,7 +136,9 @@
     staging_dir = inno_build_dir / "stage"
 
     inno_build_dir.mkdir(parents=True, exist_ok=True)
-    run_pyoxidizer(source_dir, inno_build_dir, staging_dir, target_triple)
+    create_pyoxidizer_install_layout(
+        source_dir, inno_build_dir, staging_dir, target_triple
+    )
 
     process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
 
--- a/contrib/packaging/hgpackaging/pyoxidizer.py	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/packaging/hgpackaging/pyoxidizer.py	Mon May 17 15:05:24 2021 +0200
@@ -12,6 +12,7 @@
 import shutil
 import subprocess
 import sys
+import typing
 
 from .downloads import download_entry
 from .util import (
@@ -53,17 +54,36 @@
 ]
 
 
+def build_docs_html(source_dir: pathlib.Path):
+    """Ensures HTML documentation is built.
+
+    This will fail if docutils isn't available.
+
+    (The HTML docs aren't built as part of `pip install` so we need to build them
+    out of band.)
+    """
+    subprocess.run(
+        [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"],
+        cwd=str(source_dir),
+        check=True,
+    )
+
+
 def run_pyoxidizer(
     source_dir: pathlib.Path,
     build_dir: pathlib.Path,
-    out_dir: pathlib.Path,
     target_triple: str,
-):
-    """Build Mercurial with PyOxidizer and copy additional files into place.
+    build_vars: typing.Optional[typing.Dict[str, str]] = None,
+    target: typing.Optional[str] = None,
+) -> pathlib.Path:
+    """Run `pyoxidizer` in an environment with access to build dependencies.
 
-    After successful completion, ``out_dir`` contains files constituting a
-    Mercurial install.
+    Returns the output directory that pyoxidizer would have used for build
+    artifacts. Actual build artifacts are likely in a sub-directory with the
+    name of the pyoxidizer build target that was built.
     """
+    build_vars = build_vars or {}
+
     # We need to make gettext binaries available for compiling i18n files.
     gettext_pkg, gettext_entry = download_entry('gettext', build_dir)
     gettext_dep_pkg = download_entry('gettext-dep', build_dir)[0]
@@ -91,8 +111,31 @@
         target_triple,
     ]
 
+    for k, v in sorted(build_vars.items()):
+        args.extend(["--var", k, v])
+
+    if target:
+        args.append(target)
+
     subprocess.run(args, env=env, check=True)
 
+    return source_dir / "build" / "pyoxidizer" / target_triple / "release"
+
+
+def create_pyoxidizer_install_layout(
+    source_dir: pathlib.Path,
+    build_dir: pathlib.Path,
+    out_dir: pathlib.Path,
+    target_triple: str,
+):
+    """Build Mercurial with PyOxidizer and copy additional files into place.
+
+    After successful completion, ``out_dir`` contains files constituting a
+    Mercurial install.
+    """
+
+    run_pyoxidizer(source_dir, build_dir, target_triple)
+
     if "windows" in target_triple:
         target = "app_windows"
     else:
@@ -113,14 +156,7 @@
     # is taught to use the importlib APIs for reading resources.
     process_install_rules(STAGING_RULES_APP, build_dir, out_dir)
 
-    # We also need to run setup.py build_doc to produce html files,
-    # as they aren't built as part of ``pip install``.
-    # This will fail if docutils isn't installed.
-    subprocess.run(
-        [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"],
-        cwd=str(source_dir),
-        check=True,
-    )
+    build_docs_html(source_dir)
 
     if "windows" in target_triple:
         process_install_rules(STAGING_RULES_WINDOWS, source_dir, out_dir)
--- a/contrib/packaging/hgpackaging/wix.py	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/packaging/hgpackaging/wix.py	Mon May 17 15:05:24 2021 +0200
@@ -22,7 +22,11 @@
     build_py2exe,
     stage_install,
 )
-from .pyoxidizer import run_pyoxidizer
+from .pyoxidizer import (
+    build_docs_html,
+    create_pyoxidizer_install_layout,
+    run_pyoxidizer,
+)
 from .util import (
     extract_zip_to_directory,
     normalize_windows_version,
@@ -386,36 +390,66 @@
     """Build a WiX MSI installer using PyOxidizer."""
     hg_build_dir = source_dir / "build"
     build_dir = hg_build_dir / ("wix-%s" % target_triple)
-    staging_dir = build_dir / "stage"
-
-    arch = "x64" if "x86_64" in target_triple else "x86"
 
     build_dir.mkdir(parents=True, exist_ok=True)
-    run_pyoxidizer(source_dir, build_dir, staging_dir, target_triple)
+
+    # Need to ensure docs HTML is built because this isn't done as part of
+    # `pip install Mercurial`.
+    build_docs_html(source_dir)
+
+    build_vars = {}
 
-    # We also install some extra files.
-    process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
+    if msi_name:
+        build_vars["MSI_NAME"] = msi_name
+
+    if version:
+        build_vars["VERSION"] = version
+
+    if extra_features:
+        build_vars["EXTRA_MSI_FEATURES"] = ";".join(extra_features)
 
-    # And remove some files we don't want.
-    for f in STAGING_REMOVE_FILES:
-        p = staging_dir / f
-        if p.exists():
-            print('removing %s' % p)
-            p.unlink()
+    if signing_info:
+        if signing_info["cert_path"]:
+            build_vars["SIGNING_PFX_PATH"] = signing_info["cert_path"]
+        if signing_info["cert_password"]:
+            build_vars["SIGNING_PFX_PASSWORD"] = signing_info["cert_password"]
+        if signing_info["subject_name"]:
+            build_vars["SIGNING_SUBJECT_NAME"] = signing_info["subject_name"]
+        if signing_info["timestamp_url"]:
+            build_vars["TIME_STAMP_SERVER_URL"] = signing_info["timestamp_url"]
 
-    return run_wix_packaging(
+    if extra_wxs:
+        raise Exception(
+            "support for extra .wxs files has been temporarily dropped"
+        )
+
+    out_dir = run_pyoxidizer(
         source_dir,
         build_dir,
-        staging_dir,
-        arch,
-        version,
-        python2=False,
-        msi_name=msi_name,
-        extra_wxs=extra_wxs,
-        extra_features=extra_features,
-        signing_info=signing_info,
+        target_triple,
+        build_vars=build_vars,
+        target="msi",
     )
 
+    msi_dir = out_dir / "msi"
+    msi_files = [f for f in os.listdir(msi_dir) if f.endswith(".msi")]
+
+    if len(msi_files) != 1:
+        raise Exception("expected exactly 1 .msi file; got %d" % len(msi_files))
+
+    msi_filename = msi_files[0]
+
+    msi_path = msi_dir / msi_filename
+    dist_path = source_dir / "dist" / msi_filename
+
+    dist_path.parent.mkdir(parents=True, exist_ok=True)
+
+    shutil.copyfile(msi_path, dist_path)
+
+    return {
+        "msi_path": dist_path,
+    }
+
 
 def run_wix_packaging(
     source_dir: pathlib.Path,
--- a/contrib/packaging/wix/mercurial.wxs	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/packaging/wix/mercurial.wxs	Mon May 17 15:05:24 2021 +0200
@@ -135,9 +135,13 @@
     <UIRef Id="WixUI_FeatureTree" />
     <UIRef Id="WixUI_ErrorProgressText" />
 
+    <?ifdef PyOxidizer?>
+    <WixVariable Id="WixUILicenseRtf" Value="COPYING.rtf" />
+    <Icon Id="hgIcon.ico" SourceFile="mercurial.ico" />
+    <?else?>
     <WixVariable Id="WixUILicenseRtf" Value="contrib\packaging\wix\COPYING.rtf" />
-
     <Icon Id="hgIcon.ico" SourceFile="contrib/win32/mercurial.ico" />
+    <?endif?>
 
     <Upgrade Id='$(var.ProductUpgradeCode)'>
       <UpgradeVersion
--- a/contrib/perf.py	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/perf.py	Mon May 17 15:05:24 2021 +0200
@@ -66,6 +66,8 @@
 import tempfile
 import threading
 import time
+
+import mercurial.revlog
 from mercurial import (
     changegroup,
     cmdutil,
@@ -76,7 +78,6 @@
     hg,
     mdiff,
     merge,
-    revlog,
     util,
 )
 
@@ -119,6 +120,21 @@
 except ImportError:
     profiling = None
 
+try:
+    from mercurial.revlogutils import constants as revlog_constants
+
+    perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
+
+    def revlog(opener, *args, **kwargs):
+        return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
+
+
+except (ImportError, AttributeError):
+    perf_rl_kind = None
+
+    def revlog(opener, *args, **kwargs):
+        return mercurial.revlog.revlog(opener, *args, **kwargs)
+
 
 def identity(a):
     return a
@@ -1809,7 +1825,11 @@
 
     mercurial.revlog._prereadsize = 2 ** 24  # disable lazy parser in old hg
     n = scmutil.revsingle(repo, rev).node()
-    cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
+
+    try:
+        cl = revlog(getsvfs(repo), radix=b"00changelog")
+    except TypeError:
+        cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
 
     def d():
         cl.rev(n)
@@ -2592,17 +2612,25 @@
     rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
 
     opener = getattr(rl, 'opener')  # trick linter
-    indexfile = rl.indexfile
+    # compat with hg <= 5.8
+    radix = getattr(rl, 'radix', None)
+    indexfile = getattr(rl, '_indexfile', None)
+    if indexfile is None:
+        # compatibility with <= hg-5.8
+        indexfile = getattr(rl, 'indexfile')
     data = opener.read(indexfile)
 
     header = struct.unpack(b'>I', data[0:4])[0]
     version = header & 0xFFFF
     if version == 1:
-        revlogio = revlog.revlogio()
         inline = header & (1 << 16)
     else:
         raise error.Abort(b'unsupported revlog version: %d' % version)
 
+    parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
+    if parse_index_v1 is None:
+        parse_index_v1 = mercurial.revlog.revlogio().parseindex
+
     rllen = len(rl)
 
     node0 = rl.node(0)
@@ -2617,33 +2645,35 @@
     allnodesrev = list(reversed(allnodes))
 
     def constructor():
-        revlog.revlog(opener, indexfile)
+        if radix is not None:
+            revlog(opener, radix=radix)
+        else:
+            # hg <= 5.8
+            revlog(opener, indexfile=indexfile)
 
     def read():
         with opener(indexfile) as fh:
             fh.read()
 
     def parseindex():
-        revlogio.parseindex(data, inline)
+        parse_index_v1(data, inline)
 
     def getentry(revornode):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
         index[revornode]
 
     def getentries(revs, count=1):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
 
         for i in range(count):
             for rev in revs:
                 index[rev]
 
     def resolvenode(node):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
         rev = getattr(index, 'rev', None)
         if rev is None:
-            nodemap = getattr(
-                revlogio.parseindex(data, inline)[0], 'nodemap', None
-            )
+            nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
             # This only works for the C code.
             if nodemap is None:
                 return
@@ -2655,12 +2685,10 @@
             pass
 
     def resolvenodes(nodes, count=1):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
         rev = getattr(index, 'rev', None)
         if rev is None:
-            nodemap = getattr(
-                revlogio.parseindex(data, inline)[0], 'nodemap', None
-            )
+            nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
             # This only works for the C code.
             if nodemap is None:
                 return
@@ -3015,10 +3043,17 @@
     if util.safehasattr(orig, k):
         revlogkwargs[k] = getattr(orig, k)
 
-    origindexpath = orig.opener.join(orig.indexfile)
-    origdatapath = orig.opener.join(orig.datafile)
-    indexname = 'revlog.i'
-    dataname = 'revlog.d'
+    indexfile = getattr(orig, '_indexfile', None)
+    if indexfile is None:
+        # compatibility with <= hg-5.8
+        indexfile = getattr(orig, 'indexfile')
+    origindexpath = orig.opener.join(indexfile)
+
+    datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
+    origdatapath = orig.opener.join(datafile)
+    radix = b'revlog'
+    indexname = b'revlog.i'
+    dataname = b'revlog.d'
 
     tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
     try:
@@ -3043,9 +3078,12 @@
         vfs = vfsmod.vfs(tmpdir)
         vfs.options = getattr(orig.opener, 'options', None)
 
-        dest = revlog.revlog(
-            vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
-        )
+        try:
+            dest = revlog(vfs, radix=radix, **revlogkwargs)
+        except TypeError:
+            dest = revlog(
+                vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
+            )
         if dest._inline:
             raise error.Abort('not supporting inline revlog (yet)')
         # make sure internals are initialized
@@ -3111,9 +3149,14 @@
 
     def rlfh(rl):
         if rl._inline:
-            return getsvfs(repo)(rl.indexfile)
+            indexfile = getattr(rl, '_indexfile', None)
+            if indexfile is None:
+                # compatibility with <= hg-5.8
+                indexfile = getattr(rl, 'indexfile')
+            return getsvfs(repo)(indexfile)
         else:
-            return getsvfs(repo)(rl.datafile)
+            datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
+            return getsvfs(repo)(datafile)
 
     def doread():
         rl.clearcaches()
--- a/contrib/undumprevlog	Fri May 07 10:39:58 2021 +0200
+++ b/contrib/undumprevlog	Mon May 17 15:05:24 2021 +0200
@@ -15,6 +15,10 @@
 )
 from mercurial.utils import procutil
 
+from mercurial.revlogutils import (
+    constants as revlog_constants,
+)
+
 for fp in (sys.stdin, sys.stdout, sys.stderr):
     procutil.setbinary(fp)
 
@@ -28,7 +32,12 @@
         break
     if l.startswith("file:"):
         f = encoding.strtolocal(l[6:-1])
-        r = revlog.revlog(opener, f)
+        assert f.endswith(b'.i')
+        r = revlog.revlog(
+            opener,
+            target=(revlog_constants.KIND_OTHER, b'undump-revlog'),
+            radix=f[:-2],
+        )
         procutil.stdout.write(b'%s\n' % f)
     elif l.startswith("node:"):
         n = bin(l[6:-1])
--- a/hgext/absorb.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/absorb.py	Mon May 17 15:05:24 2021 +0200
@@ -38,7 +38,6 @@
 from mercurial.i18n import _
 from mercurial.node import (
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -109,7 +108,7 @@
         return b''
 
     def node(self):
-        return nullid
+        return self._repo.nullid
 
 
 def uniq(lst):
@@ -927,7 +926,7 @@
         the commit is a clone from ctx, with a (optionally) different p1, and
         different file contents replaced by memworkingcopy.
         """
-        parents = p1 and (p1, nullid)
+        parents = p1 and (p1, self.repo.nullid)
         extra = ctx.extra()
         if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
             extra[b'absorb_source'] = ctx.hex()
--- a/hgext/convert/git.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/convert/git.py	Mon May 17 15:05:24 2021 +0200
@@ -9,7 +9,7 @@
 import os
 
 from mercurial.i18n import _
-from mercurial.node import nullhex
+from mercurial.node import sha1nodeconstants
 from mercurial import (
     config,
     error,
@@ -192,7 +192,7 @@
         return heads
 
     def catfile(self, rev, ftype):
-        if rev == nullhex:
+        if rev == sha1nodeconstants.nullhex:
             raise IOError
         self.catfilepipe[0].write(rev + b'\n')
         self.catfilepipe[0].flush()
@@ -214,7 +214,7 @@
         return data
 
     def getfile(self, name, rev):
-        if rev == nullhex:
+        if rev == sha1nodeconstants.nullhex:
             return None, None
         if name == b'.hgsub':
             data = b'\n'.join([m.hgsub() for m in self.submoditer()])
@@ -228,7 +228,7 @@
         return data, mode
 
     def submoditer(self):
-        null = nullhex
+        null = sha1nodeconstants.nullhex
         for m in sorted(self.submodules, key=lambda p: p.path):
             if m.node != null:
                 yield m
@@ -317,7 +317,7 @@
                 subexists[0] = True
                 if entry[4] == b'D' or renamesource:
                     subdeleted[0] = True
-                    changes.append((b'.hgsub', nullhex))
+                    changes.append((b'.hgsub', sha1nodeconstants.nullhex))
                 else:
                     changes.append((b'.hgsub', b''))
             elif entry[1] == b'160000' or entry[0] == b':160000':
@@ -325,7 +325,7 @@
                     subexists[0] = True
             else:
                 if renamesource:
-                    h = nullhex
+                    h = sha1nodeconstants.nullhex
                 self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
                 changes.append((f, h))
 
@@ -362,7 +362,7 @@
 
         if subexists[0]:
             if subdeleted[0]:
-                changes.append((b'.hgsubstate', nullhex))
+                changes.append((b'.hgsubstate', sha1nodeconstants.nullhex))
             else:
                 self.retrievegitmodules(version)
                 changes.append((b'.hgsubstate', b''))
--- a/hgext/convert/hg.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/convert/hg.py	Mon May 17 15:05:24 2021 +0200
@@ -27,8 +27,7 @@
 from mercurial.node import (
     bin,
     hex,
-    nullhex,
-    nullid,
+    sha1nodeconstants,
 )
 from mercurial import (
     bookmarks,
@@ -160,7 +159,7 @@
                 continue
             revid = revmap.get(source.lookuprev(s[0]))
             if not revid:
-                if s[0] == nullhex:
+                if s[0] == sha1nodeconstants.nullhex:
                     revid = s[0]
                 else:
                     # missing, but keep for hash stability
@@ -179,7 +178,7 @@
 
             revid = s[0]
             subpath = s[1]
-            if revid != nullhex:
+            if revid != sha1nodeconstants.nullhex:
                 revmap = self.subrevmaps.get(subpath)
                 if revmap is None:
                     revmap = mapfile(
@@ -304,9 +303,9 @@
             parent = parents[0]
 
         if len(parents) < 2:
-            parents.append(nullid)
+            parents.append(self.repo.nullid)
         if len(parents) < 2:
-            parents.append(nullid)
+            parents.append(self.repo.nullid)
         p2 = parents.pop(0)
 
         text = commit.desc
@@ -356,7 +355,7 @@
             p2 = parents.pop(0)
             p1ctx = self.repo[p1]
             p2ctx = None
-            if p2 != nullid:
+            if p2 != self.repo.nullid:
                 p2ctx = self.repo[p2]
             fileset = set(files)
             if full:
@@ -421,7 +420,7 @@
 
     def puttags(self, tags):
         tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
-        tagparent = tagparent or nullid
+        tagparent = tagparent or self.repo.nullid
 
         oldlines = set()
         for branch, heads in pycompat.iteritems(self.repo.branchmap()):
--- a/hgext/git/dirstate.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/git/dirstate.py	Mon May 17 15:05:24 2021 +0200
@@ -4,7 +4,7 @@
 import errno
 import os
 
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
 from mercurial import (
     error,
     extensions,
@@ -81,14 +81,16 @@
         except pygit2.GitError:
             # Typically happens when peeling HEAD fails, as in an
             # empty repository.
-            return nullid
+            return sha1nodeconstants.nullid
 
     def p2(self):
         # TODO: MERGE_HEAD? something like that, right?
-        return nullid
+        return sha1nodeconstants.nullid
 
-    def setparents(self, p1, p2=nullid):
-        assert p2 == nullid, b'TODO merging support'
+    def setparents(self, p1, p2=None):
+        if p2 is None:
+            p2 = sha1nodeconstants.nullid
+        assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
         self.git.head.set_target(gitutil.togitnode(p1))
 
     @util.propertycache
@@ -102,7 +104,7 @@
 
     def parents(self):
         # TODO how on earth do we find p2 if a merge is in flight?
-        return self.p1(), nullid
+        return self.p1(), sha1nodeconstants.nullid
 
     def __iter__(self):
         return (pycompat.fsencode(f.path) for f in self.git.index)
--- a/hgext/git/gitlog.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/git/gitlog.py	Mon May 17 15:05:24 2021 +0200
@@ -5,11 +5,8 @@
 from mercurial.node import (
     bin,
     hex,
-    nullhex,
-    nullid,
     nullrev,
     sha1nodeconstants,
-    wdirhex,
 )
 from mercurial import (
     ancestor,
@@ -47,7 +44,7 @@
         )
 
     def rev(self, n):
-        if n == nullid:
+        if n == sha1nodeconstants.nullid:
             return -1
         t = self._db.execute(
             'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
@@ -58,7 +55,7 @@
 
     def node(self, r):
         if r == nullrev:
-            return nullid
+            return sha1nodeconstants.nullid
         t = self._db.execute(
             'SELECT node FROM changelog WHERE rev = ?', (r,)
         ).fetchone()
@@ -135,7 +132,7 @@
             bin(v[0]): v[1]
             for v in self._db.execute('SELECT node, rev FROM changelog')
         }
-        r[nullid] = nullrev
+        r[sha1nodeconstants.nullid] = nullrev
         return r
 
     def tip(self):
@@ -144,7 +141,7 @@
         ).fetchone()
         if t:
             return bin(t[0])
-        return nullid
+        return sha1nodeconstants.nullid
 
     def revs(self, start=0, stop=None):
         if stop is None:
@@ -167,7 +164,7 @@
         return -1
 
     def _partialmatch(self, id):
-        if wdirhex.startswith(id):
+        if sha1nodeconstants.wdirhex.startswith(id):
             raise error.WdirUnsupported
         candidates = [
             bin(x[0])
@@ -176,8 +173,8 @@
                 (pycompat.sysstr(id + b'%'),),
             )
         ]
-        if nullhex.startswith(id):
-            candidates.append(nullid)
+        if sha1nodeconstants.nullhex.startswith(id):
+            candidates.append(sha1nodeconstants.nullid)
         if len(candidates) > 1:
             raise error.AmbiguousPrefixLookupError(
                 id, b'00changelog.i', _(b'ambiguous identifier')
@@ -223,8 +220,10 @@
             n = nodeorrev
         extra = {b'branch': b'default'}
         # handle looking up nullid
-        if n == nullid:
-            return hgchangelog._changelogrevision(extra=extra, manifest=nullid)
+        if n == sha1nodeconstants.nullid:
+            return hgchangelog._changelogrevision(
+                extra=extra, manifest=sha1nodeconstants.nullid
+            )
         hn = gitutil.togitnode(n)
         # We've got a real commit!
         files = [
@@ -301,7 +300,7 @@
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
         if common is None:
-            common = [nullid]
+            common = [sha1nodeconstants.nullid]
         if heads is None:
             heads = self.heads()
 
@@ -400,9 +399,9 @@
     ):
         parents = []
         hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
-        if p1 != nullid:
+        if p1 != sha1nodeconstants.nullid:
             parents.append(hp1)
-        if p2 and p2 != nullid:
+        if p2 and p2 != sha1nodeconstants.nullid:
             parents.append(hp2)
         assert date is not None
         timestamp, tz = date
@@ -435,7 +434,7 @@
         return self.get(b'', node)
 
     def get(self, relpath, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             # TODO: this should almost certainly be a memgittreemanifestctx
             return manifest.memtreemanifestctx(self, relpath)
         commit = self.gitrepo[gitutil.togitnode(node)]
@@ -454,9 +453,10 @@
         super(filelog, self).__init__(gr, db)
         assert isinstance(path, bytes)
         self.path = path
+        self.nullid = sha1nodeconstants.nullid
 
     def read(self, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             return b''
         return self.gitrepo[gitutil.togitnode(node)].data
 
--- a/hgext/git/gitutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/git/gitutil.py	Mon May 17 15:05:24 2021 +0200
@@ -1,7 +1,7 @@
 """utilities to assist in working with pygit2"""
 from __future__ import absolute_import
 
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex, sha1nodeconstants
 
 from mercurial import pycompat
 
@@ -50,4 +50,4 @@
     return bin(n)
 
 
-nullgit = togitnode(nullid)
+nullgit = togitnode(sha1nodeconstants.nullid)
--- a/hgext/git/index.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/git/index.py	Mon May 17 15:05:24 2021 +0200
@@ -5,9 +5,7 @@
 import sqlite3
 
 from mercurial.i18n import _
-from mercurial.node import (
-    nullid,
-)
+from mercurial.node import sha1nodeconstants
 
 from mercurial import (
     encoding,
@@ -317,7 +315,9 @@
                 )
             new_files = (p.delta.new_file for p in patchgen)
             files = {
-                nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
+                nf.path: nf.id.hex
+                for nf in new_files
+                if nf.id.raw != sha1nodeconstants.nullid
             }
             for p, n in files.items():
                 # We intentionally set NULLs for any file parentage
--- a/hgext/gpg.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/gpg.py	Mon May 17 15:05:24 2021 +0200
@@ -14,7 +14,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -314,7 +313,9 @@
     if revs:
         nodes = [repo.lookup(n) for n in revs]
     else:
-        nodes = [node for node in repo.dirstate.parents() if node != nullid]
+        nodes = [
+            node for node in repo.dirstate.parents() if node != repo.nullid
+        ]
         if len(nodes) > 1:
             raise error.Abort(
                 _(b'uncommitted merge - please provide a specific revision')
--- a/hgext/hgk.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/hgk.py	Mon May 17 15:05:24 2021 +0200
@@ -40,7 +40,6 @@
 
 from mercurial.i18n import _
 from mercurial.node import (
-    nullid,
     nullrev,
     short,
 )
@@ -95,7 +94,7 @@
         mmap2 = repo[node2].manifest()
         m = scmutil.match(repo[node1], files)
         st = repo.status(node1, node2, m)
-        empty = short(nullid)
+        empty = short(repo.nullid)
 
         for f in st.modified:
             # TODO get file permissions
@@ -317,9 +316,9 @@
             parentstr = b""
             if parents:
                 pp = repo.changelog.parents(n)
-                if pp[0] != nullid:
+                if pp[0] != repo.nullid:
                     parentstr += b" " + short(pp[0])
-                if pp[1] != nullid:
+                if pp[1] != repo.nullid:
                     parentstr += b" " + short(pp[1])
             if not full:
                 ui.write(b"%s%s\n" % (short(n), parentstr))
--- a/hgext/journal.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/journal.py	Mon May 17 15:05:24 2021 +0200
@@ -22,7 +22,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
 )
 
 from mercurial import (
@@ -117,8 +116,8 @@
     new = list(new)
     if util.safehasattr(dirstate, 'journalstorage'):
         # only record two hashes if there was a merge
-        oldhashes = old[:1] if old[1] == nullid else old
-        newhashes = new[:1] if new[1] == nullid else new
+        oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
+        newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
         dirstate.journalstorage.record(
             wdirparenttype, b'.', oldhashes, newhashes
         )
@@ -131,7 +130,7 @@
     if util.safehasattr(repo, 'journal'):
         oldmarks = bookmarks.bmstore(repo)
         for mark, value in pycompat.iteritems(store):
-            oldvalue = oldmarks.get(mark, nullid)
+            oldvalue = oldmarks.get(mark, repo.nullid)
             if value != oldvalue:
                 repo.journal.record(bookmarktype, mark, oldvalue, value)
     return orig(store, fp)
--- a/hgext/largefiles/basestore.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/largefiles/basestore.py	Mon May 17 15:05:24 2021 +0200
@@ -11,7 +11,8 @@
 
 from mercurial.i18n import _
 
-from mercurial import node, util
+from mercurial.node import short
+from mercurial import util
 from mercurial.utils import (
     urlutil,
 )
@@ -137,7 +138,7 @@
         filestocheck = []  # list of (cset, filename, expectedhash)
         for rev in revs:
             cctx = self.repo[rev]
-            cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
+            cset = b"%d:%s" % (cctx.rev(), short(cctx.node()))
 
             for standin in cctx:
                 filename = lfutil.splitstandin(standin)
--- a/hgext/largefiles/lfcommands.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/largefiles/lfcommands.py	Mon May 17 15:05:24 2021 +0200
@@ -17,7 +17,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
 )
 
 from mercurial import (
@@ -115,7 +114,7 @@
             rsrc[ctx]
             for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
         )
-        revmap = {nullid: nullid}
+        revmap = {rsrc.nullid: rdst.nullid}
         if tolfile:
             # Lock destination to prevent modification while it is converted to.
             # Don't need to lock src because we are just reading from its
@@ -340,7 +339,7 @@
 # Generate list of changed files
 def _getchangedfiles(ctx, parents):
     files = set(ctx.files())
-    if nullid not in parents:
+    if ctx.repo().nullid not in parents:
         mc = ctx.manifest()
         for pctx in ctx.parents():
             for fn in pctx.manifest().diff(mc):
@@ -354,7 +353,7 @@
     for p in ctx.parents():
         parents.append(revmap[p.node()])
     while len(parents) < 2:
-        parents.append(nullid)
+        parents.append(ctx.repo().nullid)
     return parents
 
 
--- a/hgext/largefiles/lfutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/largefiles/lfutil.py	Mon May 17 15:05:24 2021 +0200
@@ -15,10 +15,7 @@
 import stat
 
 from mercurial.i18n import _
-from mercurial.node import (
-    hex,
-    nullid,
-)
+from mercurial.node import hex
 from mercurial.pycompat import open
 
 from mercurial import (
@@ -613,7 +610,7 @@
     ) as progress:
         for i, n in enumerate(missing):
             progress.update(i)
-            parents = [p for p in repo[n].parents() if p != nullid]
+            parents = [p for p in repo[n].parents() if p != repo.nullid]
 
             with lfstatus(repo, value=False):
                 ctx = repo[n]
--- a/hgext/lfs/wrapper.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/lfs/wrapper.py	Mon May 17 15:05:24 2021 +0200
@@ -10,7 +10,7 @@
 import hashlib
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid, short
+from mercurial.node import bin, hex, short
 from mercurial.pycompat import (
     getattr,
     setattr,
@@ -158,7 +158,7 @@
         rev = rlog.rev(node)
     else:
         node = rlog.node(rev)
-    if node == nullid:
+    if node == rlog.nullid:
         return False
     flags = rlog.flags(rev)
     return bool(flags & revlog.REVIDX_EXTSTORED)
--- a/hgext/mq.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/mq.py	Mon May 17 15:05:24 2021 +0200
@@ -73,7 +73,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -908,13 +907,13 @@
         """
         if rev is None:
             (p1, p2) = repo.dirstate.parents()
-            if p2 == nullid:
+            if p2 == repo.nullid:
                 return p1
             if not self.applied:
                 return None
             return self.applied[-1].node
         p1, p2 = repo.changelog.parents(rev)
-        if p2 != nullid and p2 in [x.node for x in self.applied]:
+        if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
             return p2
         return p1
 
@@ -1591,7 +1590,7 @@
             for hs in repo.branchmap().iterheads():
                 heads.extend(hs)
             if not heads:
-                heads = [nullid]
+                heads = [repo.nullid]
             if repo.dirstate.p1() not in heads and not exact:
                 self.ui.status(_(b"(working directory not at a head)\n"))
 
@@ -1857,7 +1856,7 @@
                         fctx = ctx[f]
                         repo.wwrite(f, fctx.data(), fctx.flags())
                         repo.dirstate.normal(f)
-                    repo.setparents(qp, nullid)
+                    repo.setparents(qp, repo.nullid)
             for patch in reversed(self.applied[start:end]):
                 self.ui.status(_(b"popping %s\n") % patch.name)
             del self.applied[start:end]
--- a/hgext/narrow/narrowbundle2.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/narrow/narrowbundle2.py	Mon May 17 15:05:24 2021 +0200
@@ -11,7 +11,6 @@
 import struct
 
 from mercurial.i18n import _
-from mercurial.node import nullid
 from mercurial import (
     bundle2,
     changegroup,
@@ -94,7 +93,7 @@
             raise error.Abort(_(b'depth must be positive, got %d') % depth)
 
     heads = set(heads or repo.heads())
-    common = set(common or [nullid])
+    common = set(common or [repo.nullid])
 
     visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
         repo, common, heads, set(), match, depth=depth
@@ -128,7 +127,7 @@
     common,
     known,
 ):
-    common = set(common or [nullid])
+    common = set(common or [repo.nullid])
     # Steps:
     # 1. Send kill for "$known & ::common"
     #
--- a/hgext/narrow/narrowcommands.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/narrow/narrowcommands.py	Mon May 17 15:05:24 2021 +0200
@@ -12,7 +12,6 @@
 from mercurial.i18n import _
 from mercurial.node import (
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -193,7 +192,7 @@
         kwargs[b'known'] = [
             hex(ctx.node())
             for ctx in repo.set(b'::%ln', pullop.common)
-            if ctx.node() != nullid
+            if ctx.node() != repo.nullid
         ]
         if not kwargs[b'known']:
             # Mercurial serializes an empty list as '' and deserializes it as
@@ -228,10 +227,17 @@
     unfi = repo.unfiltered()
     outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
     ui.status(_(b'looking for local changes to affected paths\n'))
+    progress = ui.makeprogress(
+        topic=_(b'changesets'),
+        unit=_(b'changesets'),
+        total=len(outgoing.missing) + len(outgoing.excluded),
+    )
     localnodes = []
-    for n in itertools.chain(outgoing.missing, outgoing.excluded):
-        if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
-            localnodes.append(n)
+    with progress:
+        for n in itertools.chain(outgoing.missing, outgoing.excluded):
+            progress.increment()
+            if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+                localnodes.append(n)
     revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
     hiddenrevs = repoview.filterrevs(repo, b'visible')
     visibletostrip = list(
@@ -275,6 +281,10 @@
                 )
                 hg.clean(repo, urev)
             overrides = {(b'devel', b'strip-obsmarkers'): False}
+            if backup:
+                ui.status(_(b'moving unwanted changesets to backup\n'))
+            else:
+                ui.status(_(b'deleting unwanted changesets\n'))
             with ui.configoverride(overrides, b'narrow'):
                 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
 
@@ -310,6 +320,7 @@
                 util.unlinkpath(repo.svfs.join(f))
                 repo.store.markremoved(f)
 
+            ui.status(_(b'deleting unwanted files from working copy\n'))
             narrowspec.updateworkingcopy(repo, assumeclean=True)
             narrowspec.copytoworkingcopy(repo)
 
@@ -370,7 +381,7 @@
             ds = repo.dirstate
             p1, p2 = ds.p1(), ds.p2()
             with ds.parentchange():
-                ds.setparents(nullid, nullid)
+                ds.setparents(repo.nullid, repo.nullid)
         if isoldellipses:
             with wrappedextraprepare:
                 exchange.pull(repo, remote, heads=common)
@@ -380,7 +391,7 @@
                 known = [
                     ctx.node()
                     for ctx in repo.set(b'::%ln', common)
-                    if ctx.node() != nullid
+                    if ctx.node() != repo.nullid
                 ]
             with remote.commandexecutor() as e:
                 bundle = e.callcommand(
--- a/hgext/phabricator.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/phabricator.py	Mon May 17 15:05:24 2021 +0200
@@ -69,7 +69,7 @@
 import re
 import time
 
-from mercurial.node import bin, nullid, short
+from mercurial.node import bin, short
 from mercurial.i18n import _
 from mercurial.pycompat import getattr
 from mercurial.thirdparty import attr
@@ -586,7 +586,7 @@
                 tags.tag(
                     repo,
                     tagname,
-                    nullid,
+                    repo.nullid,
                     message=None,
                     user=None,
                     date=None,
@@ -1606,7 +1606,7 @@
                         tags.tag(
                             repo,
                             tagname,
-                            nullid,
+                            repo.nullid,
                             message=None,
                             user=None,
                             date=None,
--- a/hgext/rebase.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/rebase.py	Mon May 17 15:05:24 2021 +0200
@@ -446,8 +446,15 @@
             rebaseset = set(destmap.keys())
             rebaseset -= set(self.obsolete_with_successor_in_destination)
             rebaseset -= self.obsolete_with_successor_in_rebase_set
+            # We have our own divergence-checking in the rebase extension
+            overrides = {}
+            if obsolete.isenabled(self.repo, obsolete.createmarkersopt):
+                overrides = {
+                    (b'experimental', b'evolution.allowdivergence'): b'true'
+                }
             try:
-                rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+                with self.ui.configoverride(overrides):
+                    rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
             except error.Abort as e:
                 if e.hint is None:
                     e.hint = _(b'use --keep to keep original changesets')
--- a/hgext/remotefilelog/contentstore.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/contentstore.py	Mon May 17 15:05:24 2021 +0200
@@ -2,7 +2,10 @@
 
 import threading
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from mercurial.pycompat import getattr
 from mercurial import (
     mdiff,
@@ -55,7 +58,7 @@
         """
         chain = self.getdeltachain(name, node)
 
-        if chain[-1][ChainIndicies.BASENODE] != nullid:
+        if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
             # If we didn't receive a full chain, throw
             raise KeyError((name, hex(node)))
 
@@ -92,7 +95,7 @@
         deltabasenode.
         """
         chain = self._getpartialchain(name, node)
-        while chain[-1][ChainIndicies.BASENODE] != nullid:
+        while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
             x, x, deltabasename, deltabasenode, x = chain[-1]
             try:
                 morechain = self._getpartialchain(deltabasename, deltabasenode)
@@ -187,7 +190,12 @@
         # Since remotefilelog content stores only contain full texts, just
         # return that.
         revision = self.get(name, node)
-        return revision, name, nullid, self.getmeta(name, node)
+        return (
+            revision,
+            name,
+            sha1nodeconstants.nullid,
+            self.getmeta(name, node),
+        )
 
     def getdeltachain(self, name, node):
         # Since remotefilelog content stores just contain full texts, we return
@@ -195,7 +203,7 @@
         # The nullid in the deltabasenode slot indicates that the revision is a
         # fulltext.
         revision = self.get(name, node)
-        return [(name, node, None, nullid, revision)]
+        return [(name, node, None, sha1nodeconstants.nullid, revision)]
 
     def getmeta(self, name, node):
         self._sanitizemetacache()
@@ -237,7 +245,12 @@
 
     def getdelta(self, name, node):
         revision = self.get(name, node)
-        return revision, name, nullid, self._shared.getmeta(name, node)
+        return (
+            revision,
+            name,
+            sha1nodeconstants.nullid,
+            self._shared.getmeta(name, node),
+        )
 
     def getdeltachain(self, name, node):
         # Since our remote content stores just contain full texts, we return a
@@ -245,7 +258,7 @@
         # The nullid in the deltabasenode slot indicates that the revision is a
         # fulltext.
         revision = self.get(name, node)
-        return [(name, node, None, nullid, revision)]
+        return [(name, node, None, sha1nodeconstants.nullid, revision)]
 
     def getmeta(self, name, node):
         self._fileservice.prefetch(
@@ -268,7 +281,7 @@
         self._store = repo.store
         self._svfs = repo.svfs
         self._revlogs = dict()
-        self._cl = revlog.revlog(self._svfs, b'00changelog.i')
+        self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
         self._repackstartlinkrev = 0
 
     def get(self, name, node):
@@ -276,11 +289,11 @@
 
     def getdelta(self, name, node):
         revision = self.get(name, node)
-        return revision, name, nullid, self.getmeta(name, node)
+        return revision, name, self._cl.nullid, self.getmeta(name, node)
 
     def getdeltachain(self, name, node):
         revision = self.get(name, node)
-        return [(name, node, None, nullid, revision)]
+        return [(name, node, None, self._cl.nullid, revision)]
 
     def getmeta(self, name, node):
         rl = self._revlog(name)
@@ -304,9 +317,9 @@
             missing.discard(ancnode)
 
             p1, p2 = rl.parents(ancnode)
-            if p1 != nullid and p1 not in known:
+            if p1 != self._cl.nullid and p1 not in known:
                 missing.add(p1)
-            if p2 != nullid and p2 not in known:
+            if p2 != self._cl.nullid and p2 not in known:
                 missing.add(p2)
 
             linknode = self._cl.node(rl.linkrev(ancrev))
@@ -328,10 +341,10 @@
     def _revlog(self, name):
         rl = self._revlogs.get(name)
         if rl is None:
-            revlogname = b'00manifesttree.i'
+            revlogname = b'00manifesttree'
             if name != b'':
-                revlogname = b'meta/%s/00manifest.i' % name
-            rl = revlog.revlog(self._svfs, revlogname)
+                revlogname = b'meta/%s/00manifest' % name
+            rl = revlog.revlog(self._svfs, radix=revlogname)
             self._revlogs[name] = rl
         return rl
 
@@ -352,7 +365,7 @@
         if options and options.get(constants.OPTION_PACKSONLY):
             return
         treename = b''
-        rl = revlog.revlog(self._svfs, b'00manifesttree.i')
+        rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
         startlinkrev = self._repackstartlinkrev
         endlinkrev = self._repackendlinkrev
         for rev in pycompat.xrange(len(rl) - 1, -1, -1):
@@ -369,9 +382,9 @@
             if path[:5] != b'meta/' or path[-2:] != b'.i':
                 continue
 
-            treename = path[5 : -len(b'/00manifest.i')]
+            treename = path[5 : -len(b'/00manifest')]
 
-            rl = revlog.revlog(self._svfs, path)
+            rl = revlog.revlog(self._svfs, indexfile=path[:-2])
             for rev in pycompat.xrange(len(rl) - 1, -1, -1):
                 linkrev = rl.linkrev(rev)
                 if linkrev < startlinkrev:
--- a/hgext/remotefilelog/datapack.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/datapack.py	Mon May 17 15:05:24 2021 +0200
@@ -3,7 +3,10 @@
 import struct
 import zlib
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from mercurial.i18n import _
 from mercurial import (
     pycompat,
@@ -458,7 +461,7 @@
         rawindex = b''
         fmt = self.INDEXFORMAT
         for node, deltabase, offset, size in entries:
-            if deltabase == nullid:
+            if deltabase == sha1nodeconstants.nullid:
                 deltabaselocation = FULLTEXTINDEXMARK
             else:
                 # Instead of storing the deltabase node in the index, let's
--- a/hgext/remotefilelog/debugcommands.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/debugcommands.py	Mon May 17 15:05:24 2021 +0200
@@ -12,7 +12,7 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
+    sha1nodeconstants,
     short,
 )
 from mercurial.i18n import _
@@ -57,9 +57,9 @@
             _(b"%s => %s  %s  %s  %s\n")
             % (short(node), short(p1), short(p2), short(linknode), copyfrom)
         )
-        if p1 != nullid:
+        if p1 != sha1nodeconstants.nullid:
             queue.append(p1)
-        if p2 != nullid:
+        if p2 != sha1nodeconstants.nullid:
             queue.append(p2)
 
 
@@ -152,7 +152,7 @@
             try:
                 pp = r.parents(node)
             except Exception:
-                pp = [nullid, nullid]
+                pp = [repo.nullid, repo.nullid]
             ui.write(
                 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
                 % (
@@ -197,7 +197,7 @@
         node = r.node(i)
         pp = r.parents(node)
         ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
-        if pp[1] != nullid:
+        if pp[1] != repo.nullid:
             ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write(b"}\n")
 
@@ -212,7 +212,7 @@
             filepath = os.path.join(root, file)
             size, firstnode, mapping = parsefileblob(filepath, decompress)
             for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
-                if linknode == nullid:
+                if linknode == sha1nodeconstants.nullid:
                     actualpath = os.path.relpath(root, path)
                     key = fileserverclient.getcachekey(
                         b"reponame", actualpath, file
@@ -371,7 +371,7 @@
         current = node
         deltabase = bases[current]
 
-        while deltabase != nullid:
+        while deltabase != sha1nodeconstants.nullid:
             if deltabase not in nodes:
                 ui.warn(
                     (
@@ -397,7 +397,7 @@
             deltabase = bases[current]
         # Since ``node`` begins a valid chain, reset/memoize its base to nullid
         # so we don't traverse it again.
-        bases[node] = nullid
+        bases[node] = sha1nodeconstants.nullid
     return failures
 
 
--- a/hgext/remotefilelog/fileserverclient.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/fileserverclient.py	Mon May 17 15:05:24 2021 +0200
@@ -14,7 +14,7 @@
 import zlib
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
 from mercurial import (
     error,
     pycompat,
@@ -272,7 +272,7 @@
 def _getfiles_threaded(
     remote, receivemissing, progresstick, missed, idmap, step
 ):
-    remote._callstream(b"getfiles")
+    remote._callstream(b"x_rfl_getfiles")
     pipeo = remote._pipeo
     pipei = remote._pipei
 
@@ -599,9 +599,13 @@
 
         # partition missing nodes into nullid and not-nullid so we can
         # warn about this filtering potentially shadowing bugs.
-        nullids = len([None for unused, id in missingids if id == nullid])
+        nullids = len(
+            [None for unused, id in missingids if id == self.repo.nullid]
+        )
         if nullids:
-            missingids = [(f, id) for f, id in missingids if id != nullid]
+            missingids = [
+                (f, id) for f, id in missingids if id != self.repo.nullid
+            ]
             repo.ui.develwarn(
                 (
                     b'remotefilelog not fetching %d null revs'
--- a/hgext/remotefilelog/historypack.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/historypack.py	Mon May 17 15:05:24 2021 +0200
@@ -2,7 +2,10 @@
 
 import struct
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from mercurial import (
     pycompat,
     util,
@@ -147,9 +150,9 @@
                 pending.remove(ancnode)
                 p1node = entry[ANC_P1NODE]
                 p2node = entry[ANC_P2NODE]
-                if p1node != nullid and p1node not in known:
+                if p1node != sha1nodeconstants.nullid and p1node not in known:
                     pending.add(p1node)
-                if p2node != nullid and p2node not in known:
+                if p2node != sha1nodeconstants.nullid and p2node not in known:
                     pending.add(p2node)
 
                 yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
@@ -457,9 +460,9 @@
             def parentfunc(node):
                 x, p1, p2, x, x, x = entrymap[node]
                 parents = []
-                if p1 != nullid:
+                if p1 != sha1nodeconstants.nullid:
                     parents.append(p1)
-                if p2 != nullid:
+                if p2 != sha1nodeconstants.nullid:
                     parents.append(p2)
                 return parents
 
--- a/hgext/remotefilelog/metadatastore.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/metadatastore.py	Mon May 17 15:05:24 2021 +0200
@@ -1,6 +1,9 @@
 from __future__ import absolute_import
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from . import (
     basestore,
     shallowutil,
@@ -51,9 +54,9 @@
                     missing.append((name, node))
                     continue
                 p1, p2, linknode, copyfrom = value
-                if p1 != nullid and p1 not in known:
+                if p1 != sha1nodeconstants.nullid and p1 not in known:
                     queue.append((copyfrom or curname, p1))
-                if p2 != nullid and p2 not in known:
+                if p2 != sha1nodeconstants.nullid and p2 not in known:
                     queue.append((curname, p2))
             return missing
 
--- a/hgext/remotefilelog/remotefilectx.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/remotefilectx.py	Mon May 17 15:05:24 2021 +0200
@@ -9,7 +9,7 @@
 import collections
 import time
 
-from mercurial.node import bin, hex, nullid, nullrev
+from mercurial.node import bin, hex, nullrev
 from mercurial import (
     ancestor,
     context,
@@ -35,7 +35,7 @@
         ancestormap=None,
     ):
         if fileid == nullrev:
-            fileid = nullid
+            fileid = repo.nullid
         if fileid and len(fileid) == 40:
             fileid = bin(fileid)
         super(remotefilectx, self).__init__(
@@ -78,7 +78,7 @@
 
     @propertycache
     def _linkrev(self):
-        if self._filenode == nullid:
+        if self._filenode == self._repo.nullid:
             return nullrev
 
         ancestormap = self.ancestormap()
@@ -174,7 +174,7 @@
 
         p1, p2, linknode, copyfrom = ancestormap[self._filenode]
         results = []
-        if p1 != nullid:
+        if p1 != repo.nullid:
             path = copyfrom or self._path
             flog = repo.file(path)
             p1ctx = remotefilectx(
@@ -183,7 +183,7 @@
             p1ctx._descendantrev = self.rev()
             results.append(p1ctx)
 
-        if p2 != nullid:
+        if p2 != repo.nullid:
             path = self._path
             flog = repo.file(path)
             p2ctx = remotefilectx(
@@ -504,25 +504,25 @@
             if renamed:
                 p1 = renamed
             else:
-                p1 = (path, pcl[0]._manifest.get(path, nullid))
+                p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid))
 
-            p2 = (path, nullid)
+            p2 = (path, self._repo.nullid)
             if len(pcl) > 1:
-                p2 = (path, pcl[1]._manifest.get(path, nullid))
+                p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid))
 
             m = {}
-            if p1[1] != nullid:
+            if p1[1] != self._repo.nullid:
                 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
                 m.update(p1ctx.filelog().ancestormap(p1[1]))
 
-            if p2[1] != nullid:
+            if p2[1] != self._repo.nullid:
                 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
                 m.update(p2ctx.filelog().ancestormap(p2[1]))
 
             copyfrom = b''
             if renamed:
                 copyfrom = renamed[0]
-            m[None] = (p1[1], p2[1], nullid, copyfrom)
+            m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom)
             self._ancestormap = m
 
         return self._ancestormap
--- a/hgext/remotefilelog/remotefilelog.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/remotefilelog.py	Mon May 17 15:05:24 2021 +0200
@@ -10,12 +10,7 @@
 import collections
 import os
 
-from mercurial.node import (
-    bin,
-    nullid,
-    wdirfilenodeids,
-    wdirid,
-)
+from mercurial.node import bin
 from mercurial.i18n import _
 from mercurial import (
     ancestor,
@@ -100,7 +95,7 @@
 
         pancestors = {}
         queue = []
-        if realp1 != nullid:
+        if realp1 != self.repo.nullid:
             p1flog = self
             if copyfrom:
                 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
@@ -108,7 +103,7 @@
             pancestors.update(p1flog.ancestormap(realp1))
             queue.append(realp1)
             visited.add(realp1)
-        if p2 != nullid:
+        if p2 != self.repo.nullid:
             pancestors.update(self.ancestormap(p2))
             queue.append(p2)
             visited.add(p2)
@@ -129,10 +124,10 @@
                 pacopyfrom,
             )
 
-            if pa1 != nullid and pa1 not in visited:
+            if pa1 != self.repo.nullid and pa1 not in visited:
                 queue.append(pa1)
                 visited.add(pa1)
-            if pa2 != nullid and pa2 not in visited:
+            if pa2 != self.repo.nullid and pa2 not in visited:
                 queue.append(pa2)
                 visited.add(pa2)
 
@@ -238,7 +233,7 @@
         returns True if text is different than what is stored.
         """
 
-        if node == nullid:
+        if node == self.repo.nullid:
             return True
 
         nodetext = self.read(node)
@@ -275,13 +270,13 @@
         return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
 
     def parents(self, node):
-        if node == nullid:
-            return nullid, nullid
+        if node == self.repo.nullid:
+            return self.repo.nullid, self.repo.nullid
 
         ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
         p1, p2, linknode, copyfrom = ancestormap[node]
         if copyfrom:
-            p1 = nullid
+            p1 = self.repo.nullid
 
         return p1, p2
 
@@ -317,8 +312,8 @@
             if prevnode is None:
                 basenode = prevnode = p1
             if basenode == node:
-                basenode = nullid
-            if basenode != nullid:
+                basenode = self.repo.nullid
+            if basenode != self.repo.nullid:
                 revision = None
                 delta = self.revdiff(basenode, node)
             else:
@@ -336,6 +331,8 @@
                 delta=delta,
                 # Sidedata is not supported yet
                 sidedata=None,
+                # Protocol flags are not used yet
+                protocol_flags=0,
             )
 
     def revdiff(self, node1, node2):
@@ -380,13 +377,16 @@
         this is generally only used for bundling and communicating with vanilla
         hg clients.
         """
-        if node == nullid:
+        if node == self.repo.nullid:
             return b""
         if len(node) != 20:
             raise error.LookupError(
                 node, self.filename, _(b'invalid revision input')
             )
-        if node == wdirid or node in wdirfilenodeids:
+        if (
+            node == self.repo.nodeconstants.wdirid
+            or node in self.repo.nodeconstants.wdirfilenodeids
+        ):
             raise error.WdirUnsupported
 
         store = self.repo.contentstore
@@ -432,8 +432,8 @@
         return self.repo.metadatastore.getancestors(self.filename, node)
 
     def ancestor(self, a, b):
-        if a == nullid or b == nullid:
-            return nullid
+        if a == self.repo.nullid or b == self.repo.nullid:
+            return self.repo.nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
         nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -442,13 +442,13 @@
         if ancs:
             # choose a consistent winner when there's a tie
             return min(map(nodemap.__getitem__, ancs))
-        return nullid
+        return self.repo.nullid
 
     def commonancestorsheads(self, a, b):
         """calculate all the heads of the common ancestors of nodes a and b"""
 
-        if a == nullid or b == nullid:
-            return nullid
+        if a == self.repo.nullid or b == self.repo.nullid:
+            return self.repo.nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
         nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -472,10 +472,10 @@
                 p1, p2, linknode, copyfrom = pdata
                 # Don't follow renames (copyfrom).
                 # remotefilectx.ancestor does that.
-                if p1 != nullid and not copyfrom:
+                if p1 != self.repo.nullid and not copyfrom:
                     parents.append(p1)
                     allparents.add(p1)
-                if p2 != nullid:
+                if p2 != self.repo.nullid:
                     parents.append(p2)
                     allparents.add(p2)
 
--- a/hgext/remotefilelog/remotefilelogserver.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/remotefilelogserver.py	Mon May 17 15:05:24 2021 +0200
@@ -13,7 +13,7 @@
 import zlib
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
 from mercurial.pycompat import open
 from mercurial import (
     changegroup,
@@ -242,7 +242,7 @@
     filecachepath = os.path.join(cachepath, path, hex(node))
     if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
         filectx = repo.filectx(path, fileid=node)
-        if filectx.node() == nullid:
+        if filectx.node() == repo.nullid:
             repo.changelog = changelog.changelog(repo.svfs)
             filectx = repo.filectx(path, fileid=node)
 
@@ -284,7 +284,7 @@
     """A server api for requesting a filelog's heads"""
     flog = repo.file(path)
     heads = flog.heads()
-    return b'\n'.join((hex(head) for head in heads if head != nullid))
+    return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
 
 
 def getfile(repo, proto, file, node):
@@ -302,7 +302,7 @@
     if not cachepath:
         cachepath = os.path.join(repo.path, b"remotefilelogcache")
     node = bin(node.strip())
-    if node == nullid:
+    if node == repo.nullid:
         return b'0\0'
     return b'0\0' + _loadfileblob(repo, cachepath, file, node)
 
@@ -327,7 +327,7 @@
                 break
 
             node = bin(request[:40])
-            if node == nullid:
+            if node == repo.nullid:
                 yield b'0\n'
                 continue
 
@@ -380,8 +380,8 @@
         ancestortext = b""
         for ancestorctx in ancestors:
             parents = ancestorctx.parents()
-            p1 = nullid
-            p2 = nullid
+            p1 = repo.nullid
+            p2 = repo.nullid
             if len(parents) > 0:
                 p1 = parents[0].filenode()
             if len(parents) > 1:
--- a/hgext/remotefilelog/repack.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/repack.py	Mon May 17 15:05:24 2021 +0200
@@ -4,10 +4,7 @@
 import time
 
 from mercurial.i18n import _
-from mercurial.node import (
-    nullid,
-    short,
-)
+from mercurial.node import short
 from mercurial import (
     encoding,
     error,
@@ -586,7 +583,7 @@
         # Create one contiguous chain and reassign deltabases.
         for i, node in enumerate(orphans):
             if i == 0:
-                deltabases[node] = (nullid, 0)
+                deltabases[node] = (self.repo.nullid, 0)
             else:
                 parent = orphans[i - 1]
                 deltabases[node] = (parent, deltabases[parent][1] + 1)
@@ -676,8 +673,8 @@
                 # of immediate child
                 deltatuple = deltabases.get(node, None)
                 if deltatuple is None:
-                    deltabase, chainlen = nullid, 0
-                    deltabases[node] = (nullid, 0)
+                    deltabase, chainlen = self.repo.nullid, 0
+                    deltabases[node] = (self.repo.nullid, 0)
                     nobase.add(node)
                 else:
                     deltabase, chainlen = deltatuple
@@ -692,7 +689,7 @@
                     # file was copied from elsewhere. So don't attempt to do any
                     # deltas with the other file.
                     if copyfrom:
-                        p1 = nullid
+                        p1 = self.repo.nullid
 
                     if chainlen < maxchainlen:
                         # Record this child as the delta base for its parents.
@@ -700,9 +697,9 @@
                         # many children, and this will only choose the last one.
                         # TODO: record all children and try all deltas to find
                         # best
-                        if p1 != nullid:
+                        if p1 != self.repo.nullid:
                             deltabases[p1] = (node, chainlen + 1)
-                        if p2 != nullid:
+                        if p2 != self.repo.nullid:
                             deltabases[p2] = (node, chainlen + 1)
 
             # experimental config: repack.chainorphansbysize
@@ -719,7 +716,7 @@
                 # TODO: Optimize the deltachain fetching. Since we're
                 # iterating over the different version of the file, we may
                 # be fetching the same deltachain over and over again.
-                if deltabase != nullid:
+                if deltabase != self.repo.nullid:
                     deltaentry = self.data.getdelta(filename, node)
                     delta, deltabasename, origdeltabase, meta = deltaentry
                     size = meta.get(constants.METAKEYSIZE)
@@ -791,9 +788,9 @@
                     # If copyfrom == filename, it means the copy history
                     # went to come other file, then came back to this one, so we
                     # should continue processing it.
-                    if p1 != nullid and copyfrom != filename:
+                    if p1 != self.repo.nullid and copyfrom != filename:
                         dontprocess.add(p1)
-                    if p2 != nullid:
+                    if p2 != self.repo.nullid:
                         dontprocess.add(p2)
                     continue
 
@@ -814,9 +811,9 @@
         def parentfunc(node):
             p1, p2, linknode, copyfrom = ancestors[node]
             parents = []
-            if p1 != nullid:
+            if p1 != self.repo.nullid:
                 parents.append(p1)
-            if p2 != nullid:
+            if p2 != self.repo.nullid:
                 parents.append(p2)
             return parents
 
--- a/hgext/remotefilelog/shallowbundle.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/shallowbundle.py	Mon May 17 15:05:24 2021 +0200
@@ -7,7 +7,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
 from mercurial import (
     bundlerepo,
     changegroup,
@@ -143,7 +143,7 @@
 
     def nodechunk(self, revlog, node, prevnode, linknode):
         prefix = b''
-        if prevnode == nullid:
+        if prevnode == revlog.nullid:
             delta = revlog.rawdata(node)
             prefix = mdiff.trivialdiffheader(len(delta))
         else:
@@ -245,7 +245,7 @@
     processed = set()
 
     def available(f, node, depf, depnode):
-        if depnode != nullid and (depf, depnode) not in processed:
+        if depnode != repo.nullid and (depf, depnode) not in processed:
             if not (depf, depnode) in revisiondatas:
                 # It's not in the changegroup, assume it's already
                 # in the repo
@@ -267,7 +267,7 @@
         dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
 
         for dependent in dependents:
-            if dependent == nullid or (f, dependent) in revisiondatas:
+            if dependent == repo.nullid or (f, dependent) in revisiondatas:
                 continue
             prefetchfiles.append((f, hex(dependent)))
 
@@ -306,7 +306,7 @@
                 continue
 
         for p in [p1, p2]:
-            if p != nullid:
+            if p != repo.nullid:
                 if not available(f, node, f, p):
                     continue
 
--- a/hgext/remotefilelog/shallowrepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/remotefilelog/shallowrepo.py	Mon May 17 15:05:24 2021 +0200
@@ -9,7 +9,7 @@
 import os
 
 from mercurial.i18n import _
-from mercurial.node import hex, nullid, nullrev
+from mercurial.node import hex, nullrev
 from mercurial import (
     encoding,
     error,
@@ -206,8 +206,8 @@
                 m1 = ctx.p1().manifest()
                 files = []
                 for f in ctx.modified() + ctx.added():
-                    fparent1 = m1.get(f, nullid)
-                    if fparent1 != nullid:
+                    fparent1 = m1.get(f, self.nullid)
+                    if fparent1 != self.nullid:
                         files.append((f, hex(fparent1)))
                 self.fileservice.prefetch(files)
             return super(shallowrepository, self).commitctx(
--- a/hgext/sqlitestore.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/sqlitestore.py	Mon May 17 15:05:24 2021 +0200
@@ -52,7 +52,6 @@
 
 from mercurial.i18n import _
 from mercurial.node import (
-    nullid,
     nullrev,
     sha1nodeconstants,
     short,
@@ -290,6 +289,7 @@
     revision = attr.ib()
     delta = attr.ib()
     sidedata = attr.ib()
+    protocol_flags = attr.ib()
     linknode = attr.ib(default=None)
 
 
@@ -366,12 +366,12 @@
                 )
 
             if p1rev == nullrev:
-                p1node = nullid
+                p1node = sha1nodeconstants.nullid
             else:
                 p1node = self._revtonode[p1rev]
 
             if p2rev == nullrev:
-                p2node = nullid
+                p2node = sha1nodeconstants.nullid
             else:
                 p2node = self._revtonode[p2rev]
 
@@ -400,7 +400,7 @@
         return iter(pycompat.xrange(len(self._revisions)))
 
     def hasnode(self, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             return False
 
         return node in self._nodetorev
@@ -411,8 +411,8 @@
         )
 
     def parents(self, node):
-        if node == nullid:
-            return nullid, nullid
+        if node == sha1nodeconstants.nullid:
+            return sha1nodeconstants.nullid, sha1nodeconstants.nullid
 
         if node not in self._revisions:
             raise error.LookupError(node, self._path, _(b'no node'))
@@ -431,7 +431,7 @@
         return entry.p1rev, entry.p2rev
 
     def rev(self, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             return nullrev
 
         if node not in self._nodetorev:
@@ -441,7 +441,7 @@
 
     def node(self, rev):
         if rev == nullrev:
-            return nullid
+            return sha1nodeconstants.nullid
 
         if rev not in self._revtonode:
             raise IndexError(rev)
@@ -485,7 +485,7 @@
     def heads(self, start=None, stop=None):
         if start is None and stop is None:
             if not len(self):
-                return [nullid]
+                return [sha1nodeconstants.nullid]
 
         startrev = self.rev(start) if start is not None else nullrev
         stoprevs = {self.rev(n) for n in stop or []}
@@ -529,7 +529,7 @@
         return len(self.revision(node))
 
     def revision(self, node, raw=False, _verifyhash=True):
-        if node in (nullid, nullrev):
+        if node in (sha1nodeconstants.nullid, nullrev):
             return b''
 
         if isinstance(node, int):
@@ -596,7 +596,7 @@
                 b'unhandled value for nodesorder: %s' % nodesorder
             )
 
-        nodes = [n for n in nodes if n != nullid]
+        nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
 
         if not nodes:
             return
@@ -705,12 +705,12 @@
                 raise SQLiteStoreError(b'unhandled revision flag')
 
             if maybemissingparents:
-                if p1 != nullid and not self.hasnode(p1):
-                    p1 = nullid
+                if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
+                    p1 = sha1nodeconstants.nullid
                     storeflags |= FLAG_MISSING_P1
 
-                if p2 != nullid and not self.hasnode(p2):
-                    p2 = nullid
+                if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
+                    p2 = sha1nodeconstants.nullid
                     storeflags |= FLAG_MISSING_P2
 
             baserev = self.rev(deltabase)
@@ -736,7 +736,10 @@
                 # Possibly reset parents to make them proper.
                 entry = self._revisions[node]
 
-                if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
+                if (
+                    entry.flags & FLAG_MISSING_P1
+                    and p1 != sha1nodeconstants.nullid
+                ):
                     entry.p1node = p1
                     entry.p1rev = self._nodetorev[p1]
                     entry.flags &= ~FLAG_MISSING_P1
@@ -746,7 +749,10 @@
                         (self._nodetorev[p1], entry.flags, entry.rid),
                     )
 
-                if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
+                if (
+                    entry.flags & FLAG_MISSING_P2
+                    and p2 != sha1nodeconstants.nullid
+                ):
                     entry.p2node = p2
                     entry.p2rev = self._nodetorev[p2]
                     entry.flags &= ~FLAG_MISSING_P2
@@ -761,7 +767,7 @@
                 empty = False
                 continue
 
-            if deltabase == nullid:
+            if deltabase == sha1nodeconstants.nullid:
                 text = mdiff.patch(b'', delta)
                 storedelta = None
             else:
@@ -1012,7 +1018,7 @@
             assert revisiondata is not None
             deltabase = p1
 
-            if deltabase == nullid:
+            if deltabase == sha1nodeconstants.nullid:
                 delta = revisiondata
             else:
                 delta = mdiff.textdiff(
@@ -1021,7 +1027,7 @@
 
         # File index stores a pointer to its delta and the parent delta.
         # The parent delta is stored via a pointer to the fileindex PK.
-        if deltabase == nullid:
+        if deltabase == sha1nodeconstants.nullid:
             baseid = None
         else:
             baseid = self._revisions[deltabase].rid
@@ -1055,12 +1061,12 @@
 
         rev = len(self)
 
-        if p1 == nullid:
+        if p1 == sha1nodeconstants.nullid:
             p1rev = nullrev
         else:
             p1rev = self._nodetorev[p1]
 
-        if p2 == nullid:
+        if p2 == sha1nodeconstants.nullid:
             p2rev = nullrev
         else:
             p2rev = self._nodetorev[p2]
--- a/hgext/transplant.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/transplant.py	Mon May 17 15:05:24 2021 +0200
@@ -22,7 +22,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -134,6 +133,7 @@
 class transplanter(object):
     def __init__(self, ui, repo, opts):
         self.ui = ui
+        self.repo = repo
         self.path = repo.vfs.join(b'transplant')
         self.opener = vfsmod.vfs(self.path)
         self.transplants = transplants(
@@ -221,7 +221,7 @@
                         exchange.pull(repo, source.peer(), heads=[node])
 
                 skipmerge = False
-                if parents[1] != nullid:
+                if parents[1] != repo.nullid:
                     if not opts.get(b'parent'):
                         self.ui.note(
                             _(b'skipping merge changeset %d:%s\n')
@@ -516,7 +516,7 @@
     def parselog(self, fp):
         parents = []
         message = []
-        node = nullid
+        node = self.repo.nullid
         inmsg = False
         user = None
         date = None
@@ -568,7 +568,7 @@
         def matchfn(node):
             if self.applied(repo, node, root):
                 return False
-            if source.changelog.parents(node)[1] != nullid:
+            if source.changelog.parents(node)[1] != repo.nullid:
                 return False
             extra = source.changelog.read(node)[5]
             cnode = extra.get(b'transplant_source')
@@ -804,7 +804,7 @@
     tp = transplanter(ui, repo, opts)
 
     p1 = repo.dirstate.p1()
-    if len(repo) > 0 and p1 == nullid:
+    if len(repo) > 0 and p1 == repo.nullid:
         raise error.Abort(_(b'no revision checked out'))
     if opts.get(b'continue'):
         if not tp.canresume():
--- a/hgext/uncommit.py	Fri May 07 10:39:58 2021 +0200
+++ b/hgext/uncommit.py	Mon May 17 15:05:24 2021 +0200
@@ -20,7 +20,6 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial.node import nullid
 
 from mercurial import (
     cmdutil,
@@ -113,7 +112,7 @@
 
     new = context.memctx(
         repo,
-        parents=[base.node(), nullid],
+        parents=[base.node(), repo.nullid],
         text=message,
         files=files,
         filectxfn=filectxfn,
--- a/mercurial/bookmarks.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/bookmarks.py	Mon May 17 15:05:24 2021 +0200
@@ -15,7 +15,6 @@
     bin,
     hex,
     short,
-    wdirid,
 )
 from .pycompat import getattr
 from . import (
@@ -601,11 +600,12 @@
     # if an @pathalias already exists, we overwrite (update) it
     if path.startswith(b"file:"):
         path = urlutil.url(path).path
-    for p, u in ui.configitems(b"paths"):
-        if u.startswith(b"file:"):
-            u = urlutil.url(u).path
-        if path == u:
-            return b'%s@%s' % (b, p)
+    for name, p in urlutil.list_paths(ui):
+        loc = p.rawloc
+        if loc.startswith(b"file:"):
+            loc = urlutil.url(loc).path
+        if path == loc:
+            return b'%s@%s' % (b, name)
 
     # assign a unique "@number" suffix newly
     for x in range(1, 100):
@@ -642,7 +642,7 @@
     binarydata = []
     for book, node in bookmarks:
         if not node:  # None or ''
-            node = wdirid
+            node = repo.nodeconstants.wdirid
         binarydata.append(_binaryentry.pack(node, len(book)))
         binarydata.append(book)
     return b''.join(binarydata)
@@ -674,7 +674,7 @@
         if len(bookmark) < length:
             if entry:
                 raise error.Abort(_(b'bad bookmark stream'))
-        if node == wdirid:
+        if node == repo.nodeconstants.wdirid:
             node = None
         books.append((bookmark, node))
     return books
--- a/mercurial/branchmap.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/branchmap.py	Mon May 17 15:05:24 2021 +0200
@@ -12,7 +12,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from . import (
@@ -189,7 +188,7 @@
         self,
         repo,
         entries=(),
-        tipnode=nullid,
+        tipnode=None,
         tiprev=nullrev,
         filteredhash=None,
         closednodes=None,
@@ -200,7 +199,10 @@
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog"""
         self._repo = repo
-        self.tipnode = tipnode
+        if tipnode is None:
+            self.tipnode = repo.nullid
+        else:
+            self.tipnode = tipnode
         self.tiprev = tiprev
         self.filteredhash = filteredhash
         # closednodes is a set of nodes that close their branch. If the branch
@@ -536,7 +538,7 @@
 
         if not self.validfor(repo):
             # cache key are not valid anymore
-            self.tipnode = nullid
+            self.tipnode = repo.nullid
             self.tiprev = nullrev
             for heads in self.iterheads():
                 tiprev = max(cl.rev(node) for node in heads)
--- a/mercurial/bundle2.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/bundle2.py	Mon May 17 15:05:24 2021 +0200
@@ -158,7 +158,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     short,
 )
 from . import (
@@ -181,6 +180,7 @@
     stringutil,
     urlutil,
 )
+from .interfaces import repository
 
 urlerr = util.urlerr
 urlreq = util.urlreq
@@ -1730,8 +1730,8 @@
             part.addparam(
                 b'targetphase', b'%d' % phases.secret, mandatory=False
             )
-        if b'exp-sidedata-flag' in repo.requirements:
-            part.addparam(b'exp-sidedata', b'1')
+    if repository.REPO_FEATURE_SIDE_DATA in repo.features:
+        part.addparam(b'exp-sidedata', b'1')
 
     if opts.get(b'streamv2', False):
         addpartbundlestream2(bundler, repo, stream=True)
@@ -2014,13 +2014,6 @@
         )
         scmutil.writereporequirements(op.repo)
 
-    bundlesidedata = bool(b'exp-sidedata' in inpart.params)
-    reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
-    if reposidedata and not bundlesidedata:
-        msg = b"repository is using sidedata but the bundle source do not"
-        hint = b'this is currently unsupported'
-        raise error.Abort(msg, hint=hint)
-
     extrakwargs = {}
     targetphase = inpart.params.get(b'targetphase')
     if targetphase is not None:
@@ -2576,7 +2569,7 @@
             fullnodes=commonnodes,
         )
         cgdata = packer.generate(
-            {nullid},
+            {repo.nullid},
             list(commonnodes),
             False,
             b'narrow_widen',
@@ -2587,9 +2580,9 @@
         part.addparam(b'version', cgversion)
         if scmutil.istreemanifest(repo):
             part.addparam(b'treemanifest', b'1')
-        if b'exp-sidedata-flag' in repo.requirements:
-            part.addparam(b'exp-sidedata', b'1')
-            wanted = format_remote_wanted_sidedata(repo)
-            part.addparam(b'exp-wanted-sidedata', wanted)
+    if repository.REPO_FEATURE_SIDE_DATA in repo.features:
+        part.addparam(b'exp-sidedata', b'1')
+        wanted = format_remote_wanted_sidedata(repo)
+        part.addparam(b'exp-wanted-sidedata', wanted)
 
     return bundler
--- a/mercurial/bundlerepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/bundlerepo.py	Mon May 17 15:05:24 2021 +0200
@@ -19,7 +19,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
 )
 
@@ -47,9 +46,13 @@
     urlutil,
 )
 
+from .revlogutils import (
+    constants as revlog_constants,
+)
+
 
 class bundlerevlog(revlog.revlog):
-    def __init__(self, opener, indexfile, cgunpacker, linkmapper):
+    def __init__(self, opener, target, radix, cgunpacker, linkmapper):
         # How it works:
         # To retrieve a revision, we need to know the offset of the revision in
         # the bundle (an unbundle object). We store this offset in the index
@@ -58,7 +61,7 @@
         # To differentiate a rev in the bundle from a rev in the revlog, we
         # check revision against repotiprev.
         opener = vfsmod.readonlyvfs(opener)
-        revlog.revlog.__init__(self, opener, indexfile)
+        revlog.revlog.__init__(self, opener, target=target, radix=radix)
         self.bundle = cgunpacker
         n = len(self)
         self.repotiprev = n - 1
@@ -81,16 +84,16 @@
             for p in (p1, p2):
                 if not self.index.has_node(p):
                     raise error.LookupError(
-                        p, self.indexfile, _(b"unknown parent")
+                        p, self.display_id, _(b"unknown parent")
                     )
 
             if not self.index.has_node(deltabase):
                 raise LookupError(
-                    deltabase, self.indexfile, _(b'unknown delta base')
+                    deltabase, self.display_id, _(b'unknown delta base')
                 )
 
             baserev = self.rev(deltabase)
-            # start, size, full unc. size, base (unused), link, p1, p2, node
+            # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
             e = (
                 revlog.offset_type(start, flags),
                 size,
@@ -100,6 +103,8 @@
                 self.rev(p1),
                 self.rev(p2),
                 node,
+                0,
+                0,
             )
             self.index.append(e)
             self.bundlerevs.add(n)
@@ -172,7 +177,12 @@
         changelog.changelog.__init__(self, opener)
         linkmapper = lambda x: x
         bundlerevlog.__init__(
-            self, opener, self.indexfile, cgunpacker, linkmapper
+            self,
+            opener,
+            (revlog_constants.KIND_CHANGELOG, None),
+            self.radix,
+            cgunpacker,
+            linkmapper,
         )
 
 
@@ -188,7 +198,12 @@
     ):
         manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
         bundlerevlog.__init__(
-            self, opener, self.indexfile, cgunpacker, linkmapper
+            self,
+            opener,
+            (revlog_constants.KIND_MANIFESTLOG, dir),
+            self._revlog.radix,
+            cgunpacker,
+            linkmapper,
         )
         if dirlogstarts is None:
             dirlogstarts = {}
@@ -215,7 +230,12 @@
     def __init__(self, opener, path, cgunpacker, linkmapper):
         filelog.filelog.__init__(self, opener, path)
         self._revlog = bundlerevlog(
-            opener, self.indexfile, cgunpacker, linkmapper
+            opener,
+            # XXX should use the unencoded path
+            target=(revlog_constants.KIND_FILELOG, path),
+            radix=self._revlog.radix,
+            cgunpacker=cgunpacker,
+            linkmapper=linkmapper,
         )
 
 
@@ -447,7 +467,9 @@
         return encoding.getcwd()  # always outside the repo
 
     # Check if parents exist in localrepo before setting
-    def setparents(self, p1, p2=nullid):
+    def setparents(self, p1, p2=None):
+        if p2 is None:
+            p2 = self.nullid
         p1rev = self.changelog.rev(p1)
         p2rev = self.changelog.rev(p2)
         msg = _(b"setting parent to node %s that only exists in the bundle\n")
--- a/mercurial/cext/manifest.c	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/cext/manifest.c	Mon May 17 15:05:24 2021 +0200
@@ -28,6 +28,7 @@
 typedef struct {
 	PyObject_HEAD
 	PyObject *pydata;
+	Py_ssize_t nodelen;
 	line *lines;
 	int numlines; /* number of line entries */
 	int livelines; /* number of non-deleted lines */
@@ -49,12 +50,11 @@
 }
 
 /* get the node value of a single line */
-static PyObject *nodeof(line *l, char *flag)
+static PyObject *nodeof(Py_ssize_t nodelen, line *l, char *flag)
 {
 	char *s = l->start;
 	Py_ssize_t llen = pathlen(l);
 	Py_ssize_t hlen = l->len - llen - 2;
-	Py_ssize_t hlen_raw;
 	PyObject *hash;
 	if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
 		PyErr_SetString(PyExc_ValueError, "manifest line too short");
@@ -73,36 +73,29 @@
 		break;
 	}
 
-	switch (hlen) {
-	case 40: /* sha1 */
-		hlen_raw = 20;
-		break;
-	case 64: /* new hash */
-		hlen_raw = 32;
-		break;
-	default:
+	if (hlen != 2 * nodelen) {
 		PyErr_SetString(PyExc_ValueError, "invalid node length in manifest");
 		return NULL;
 	}
-	hash = unhexlify(s + llen + 1, hlen_raw * 2);
+	hash = unhexlify(s + llen + 1, nodelen * 2);
 	if (!hash) {
 		return NULL;
 	}
 	if (l->hash_suffix != '\0') {
 		char newhash[33];
-		memcpy(newhash, PyBytes_AsString(hash), hlen_raw);
+		memcpy(newhash, PyBytes_AsString(hash), nodelen);
 		Py_DECREF(hash);
-		newhash[hlen_raw] = l->hash_suffix;
-		hash = PyBytes_FromStringAndSize(newhash, hlen_raw+1);
+		newhash[nodelen] = l->hash_suffix;
+		hash = PyBytes_FromStringAndSize(newhash, nodelen + 1);
 	}
 	return hash;
 }
 
 /* get the node hash and flags of a line as a tuple */
-static PyObject *hashflags(line *l)
+static PyObject *hashflags(Py_ssize_t nodelen, line *l)
 {
 	char flag;
-	PyObject *hash = nodeof(l, &flag);
+	PyObject *hash = nodeof(nodelen, l, &flag);
 	PyObject *flags;
 	PyObject *tup;
 
@@ -190,17 +183,23 @@
 static int lazymanifest_init(lazymanifest *self, PyObject *args)
 {
 	char *data;
-	Py_ssize_t len;
+	Py_ssize_t nodelen, len;
 	int err, ret;
 	PyObject *pydata;
 
 	lazymanifest_init_early(self);
-	if (!PyArg_ParseTuple(args, "S", &pydata)) {
+	if (!PyArg_ParseTuple(args, "nS", &nodelen, &pydata)) {
 		return -1;
 	}
-	err = PyBytes_AsStringAndSize(pydata, &data, &len);
+	if (nodelen != 20 && nodelen != 32) {
+		/* See fixed buffer in nodeof */
+		PyErr_Format(PyExc_ValueError, "Unsupported node length");
+		return -1;
+	}
+	self->nodelen = nodelen;
+	self->dirty = false;
 
-	self->dirty = false;
+	err = PyBytes_AsStringAndSize(pydata, &data, &len);
 	if (err == -1)
 		return -1;
 	self->pydata = pydata;
@@ -291,17 +290,18 @@
 
 static PyObject *lmiter_iterentriesnext(PyObject *o)
 {
+	lmIter *self = (lmIter *)o;
 	Py_ssize_t pl;
 	line *l;
 	char flag;
 	PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
-	l = lmiter_nextline((lmIter *)o);
+	l = lmiter_nextline(self);
 	if (!l) {
 		goto done;
 	}
 	pl = pathlen(l);
 	path = PyBytes_FromStringAndSize(l->start, pl);
-	hash = nodeof(l, &flag);
+	hash = nodeof(self->m->nodelen, l, &flag);
 	if (!path || !hash) {
 		goto done;
 	}
@@ -471,7 +471,7 @@
 		PyErr_Format(PyExc_KeyError, "No such manifest entry.");
 		return NULL;
 	}
-	return hashflags(hit);
+	return hashflags(self->nodelen, hit);
 }
 
 static int lazymanifest_delitem(lazymanifest *self, PyObject *key)
@@ -568,13 +568,13 @@
 	pyhash = PyTuple_GetItem(value, 0);
 	if (!PyBytes_Check(pyhash)) {
 		PyErr_Format(PyExc_TypeError,
-			     "node must be a 20 or 32 bytes string");
+			     "node must be a %zi bytes string", self->nodelen);
 		return -1;
 	}
 	hlen = PyBytes_Size(pyhash);
-	if (hlen != 20 && hlen != 32) {
+	if (hlen != self->nodelen) {
 		PyErr_Format(PyExc_TypeError,
-			     "node must be a 20 or 32 bytes string");
+			     "node must be a %zi bytes string", self->nodelen);
 		return -1;
 	}
 	hash = PyBytes_AsString(pyhash);
@@ -739,6 +739,7 @@
 		goto nomem;
 	}
 	lazymanifest_init_early(copy);
+	copy->nodelen = self->nodelen;
 	copy->numlines = self->numlines;
 	copy->livelines = self->livelines;
 	copy->dirty = false;
@@ -777,6 +778,7 @@
 		goto nomem;
 	}
 	lazymanifest_init_early(copy);
+	copy->nodelen = self->nodelen;
 	copy->dirty = true;
 	copy->lines = malloc(self->maxlines * sizeof(line));
 	if (!copy->lines) {
@@ -872,7 +874,7 @@
 		if (!key)
 			goto nomem;
 		if (result < 0) {
-			PyObject *l = hashflags(left);
+			PyObject *l = hashflags(self->nodelen, left);
 			if (!l) {
 				goto nomem;
 			}
@@ -885,7 +887,7 @@
 			Py_DECREF(outer);
 			sneedle++;
 		} else if (result > 0) {
-			PyObject *r = hashflags(right);
+			PyObject *r = hashflags(self->nodelen, right);
 			if (!r) {
 				goto nomem;
 			}
@@ -902,12 +904,12 @@
 			if (left->len != right->len
 			    || memcmp(left->start, right->start, left->len)
 			    || left->hash_suffix != right->hash_suffix) {
-				PyObject *l = hashflags(left);
+				PyObject *l = hashflags(self->nodelen, left);
 				PyObject *r;
 				if (!l) {
 					goto nomem;
 				}
-				r = hashflags(right);
+				r = hashflags(self->nodelen, right);
 				if (!r) {
 					Py_DECREF(l);
 					goto nomem;
--- a/mercurial/cext/parsers.c	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/cext/parsers.c	Mon May 17 15:05:24 2021 +0200
@@ -668,7 +668,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 17;
+static const int version = 18;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/parsers.pyi	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/cext/parsers.pyi	Mon May 17 15:05:24 2021 +0200
@@ -29,7 +29,7 @@
 
 # From manifest.c
 class lazymanifest:
-    def __init__(self, data: bytes): ...
+    def __init__(self, nodelen: int, data: bytes): ...
     def __iter__(self) -> Iterator[bytes]: ...
 
     def __len__(self) -> int: ...
--- a/mercurial/cext/revlog.c	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/cext/revlog.c	Mon May 17 15:05:24 2021 +0200
@@ -99,7 +99,10 @@
 	int ntlookups;          /* # lookups */
 	int ntmisses;           /* # lookups that miss the cache */
 	int inlined;
-	long hdrsize; /* size of index headers. Differs in v1 v.s. v2 format */
+	long entry_size; /* size of index headers. Differs in v1 v.s. v2 format
+	                  */
+	char format_version; /* size of index headers. Differs in v1 v.s. v2
+	                        format */
 };
 
 static Py_ssize_t index_length(const indexObject *self)
@@ -115,18 +118,19 @@
 static int index_find_node(indexObject *self, const char *node);
 
 #if LONG_MAX == 0x7fffffffL
-static const char *const v1_tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
-static const char *const v2_tuple_format = PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki");
+static const char *const tuple_format = PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki");
 #else
-static const char *const v1_tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
-static const char *const v2_tuple_format = PY23("kiiiiiis#ki", "kiiiiiiy#ki");
+static const char *const tuple_format = PY23("kiiiiiis#ki", "kiiiiiiy#ki");
 #endif
 
 /* A RevlogNG v1 index entry is 64 bytes long. */
-static const long v1_hdrsize = 64;
+static const long v1_entry_size = 64;
 
 /* A Revlogv2 index entry is 96 bytes long. */
-static const long v2_hdrsize = 96;
+static const long v2_entry_size = 96;
+
+static const long format_v1 = 1; /* Internal only, could be any number */
+static const long format_v2 = 2; /* Internal only, could be any number */
 
 static void raise_revlog_error(void)
 {
@@ -164,7 +168,7 @@
 static const char *index_deref(indexObject *self, Py_ssize_t pos)
 {
 	if (pos >= self->length)
-		return self->added + (pos - self->length) * self->hdrsize;
+		return self->added + (pos - self->length) * self->entry_size;
 
 	if (self->inlined && pos > 0) {
 		if (self->offsets == NULL) {
@@ -181,7 +185,7 @@
 		return self->offsets[pos];
 	}
 
-	return (const char *)(self->buf.buf) + pos * self->hdrsize;
+	return (const char *)(self->buf.buf) + pos * self->entry_size;
 }
 
 /*
@@ -328,19 +332,58 @@
 	parent_2 = getbe32(data + 28);
 	c_node_id = data + 32;
 
-	if (self->hdrsize == v1_hdrsize) {
-		return Py_BuildValue(v1_tuple_format, offset_flags, comp_len,
-		                     uncomp_len, base_rev, link_rev, parent_1,
-		                     parent_2, c_node_id, self->nodelen);
+	if (self->entry_size == v1_entry_size) {
+		sidedata_offset = 0;
+		sidedata_comp_len = 0;
 	} else {
 		sidedata_offset = getbe64(data + 64);
 		sidedata_comp_len = getbe32(data + 72);
-
-		return Py_BuildValue(v2_tuple_format, offset_flags, comp_len,
-		                     uncomp_len, base_rev, link_rev, parent_1,
-		                     parent_2, c_node_id, self->nodelen,
-		                     sidedata_offset, sidedata_comp_len);
+	}
+
+	return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
+	                     base_rev, link_rev, parent_1, parent_2, c_node_id,
+	                     self->nodelen, sidedata_offset, sidedata_comp_len);
+}
+/*
+ * Pack header information in binary
+ */
+static PyObject *index_pack_header(indexObject *self, PyObject *args)
+{
+	int header;
+	char out[4];
+	if (!PyArg_ParseTuple(args, "I", &header)) {
+		return NULL;
 	}
+	putbe32(header, out);
+	return PyBytes_FromStringAndSize(out, 4);
+}
+/*
+ * Return the raw binary string representing a revision
+ */
+static PyObject *index_entry_binary(indexObject *self, PyObject *value)
+{
+	long rev;
+	const char *data;
+	Py_ssize_t length = index_length(self);
+
+	if (!pylong_to_long(value, &rev)) {
+		return NULL;
+	}
+	if (rev < 0 || rev >= length) {
+		PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
+		             rev);
+		return NULL;
+	};
+
+	data = index_deref(self, rev);
+	if (data == NULL)
+		return NULL;
+	if (rev == 0) {
+		/* the header is eating the start of the first entry */
+		return PyBytes_FromStringAndSize(data + 4,
+		                                 self->entry_size - 4);
+	}
+	return PyBytes_FromStringAndSize(data, self->entry_size);
 }
 
 /*
@@ -397,23 +440,12 @@
 	const char *c_node_id;
 	char *data;
 
-	if (self->hdrsize == v1_hdrsize) {
-		if (!PyArg_ParseTuple(obj, v1_tuple_format, &offset_flags,
-		                      &comp_len, &uncomp_len, &base_rev,
-		                      &link_rev, &parent_1, &parent_2,
-		                      &c_node_id, &c_node_id_len)) {
-			PyErr_SetString(PyExc_TypeError, "8-tuple required");
-			return NULL;
-		}
-	} else {
-		if (!PyArg_ParseTuple(obj, v2_tuple_format, &offset_flags,
-		                      &comp_len, &uncomp_len, &base_rev,
-		                      &link_rev, &parent_1, &parent_2,
-		                      &c_node_id, &c_node_id_len,
-		                      &sidedata_offset, &sidedata_comp_len)) {
-			PyErr_SetString(PyExc_TypeError, "10-tuple required");
-			return NULL;
-		}
+	if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
+	                      &uncomp_len, &base_rev, &link_rev, &parent_1,
+	                      &parent_2, &c_node_id, &c_node_id_len,
+	                      &sidedata_offset, &sidedata_comp_len)) {
+		PyErr_SetString(PyExc_TypeError, "10-tuple required");
+		return NULL;
 	}
 
 	if (c_node_id_len != self->nodelen) {
@@ -424,15 +456,15 @@
 	if (self->new_length == self->added_length) {
 		size_t new_added_length =
 		    self->added_length ? self->added_length * 2 : 4096;
-		void *new_added = PyMem_Realloc(self->added, new_added_length *
-		                                                 self->hdrsize);
+		void *new_added = PyMem_Realloc(
+		    self->added, new_added_length * self->entry_size);
 		if (!new_added)
 			return PyErr_NoMemory();
 		self->added = new_added;
 		self->added_length = new_added_length;
 	}
 	rev = self->length + self->new_length;
-	data = self->added + self->hdrsize * self->new_length++;
+	data = self->added + self->entry_size * self->new_length++;
 	putbe32(offset_flags >> 32, data);
 	putbe32(offset_flags & 0xffffffffU, data + 4);
 	putbe32(comp_len, data + 8);
@@ -444,11 +476,11 @@
 	memcpy(data + 32, c_node_id, c_node_id_len);
 	/* Padding since SHA-1 is only 20 bytes for now */
 	memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
-	if (self->hdrsize != v1_hdrsize) {
+	if (self->format_version == format_v2) {
 		putbe64(sidedata_offset, data + 64);
 		putbe32(sidedata_comp_len, data + 72);
 		/* Padding for 96 bytes alignment */
-		memset(data + 76, 0, self->hdrsize - 76);
+		memset(data + 76, 0, self->entry_size - 76);
 	}
 
 	if (self->ntinitialized)
@@ -463,17 +495,17 @@
    inside the transaction that creates the given revision. */
 static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args)
 {
-	uint64_t sidedata_offset;
+	uint64_t offset_flags, sidedata_offset;
 	int rev;
 	Py_ssize_t sidedata_comp_len;
 	char *data;
 #if LONG_MAX == 0x7fffffffL
-	const char *const sidedata_format = PY23("nKi", "nKi");
+	const char *const sidedata_format = PY23("nKiK", "nKiK");
 #else
-	const char *const sidedata_format = PY23("nki", "nki");
+	const char *const sidedata_format = PY23("nkik", "nkik");
 #endif
 
-	if (self->hdrsize == v1_hdrsize || self->inlined) {
+	if (self->entry_size == v1_entry_size || self->inlined) {
 		/*
 		 There is a bug in the transaction handling when going from an
 	   inline revlog to a separate index and data file. Turn it off until
@@ -485,7 +517,7 @@
 	}
 
 	if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset,
-	                      &sidedata_comp_len))
+	                      &sidedata_comp_len, &offset_flags))
 		return NULL;
 
 	if (rev < 0 || rev >= index_length(self)) {
@@ -501,7 +533,8 @@
 
 	/* Find the newly added node, offset from the "already on-disk" length
 	 */
-	data = self->added + self->hdrsize * (rev - self->length);
+	data = self->added + self->entry_size * (rev - self->length);
+	putbe64(offset_flags, data);
 	putbe64(sidedata_offset, data + 64);
 	putbe32(sidedata_comp_len, data + 72);
 
@@ -2651,17 +2684,17 @@
 	const char *data = (const char *)self->buf.buf;
 	Py_ssize_t pos = 0;
 	Py_ssize_t end = self->buf.len;
-	long incr = self->hdrsize;
+	long incr = self->entry_size;
 	Py_ssize_t len = 0;
 
-	while (pos + self->hdrsize <= end && pos >= 0) {
+	while (pos + self->entry_size <= end && pos >= 0) {
 		uint32_t comp_len, sidedata_comp_len = 0;
 		/* 3rd element of header is length of compressed inline data */
 		comp_len = getbe32(data + pos + 8);
-		if (self->hdrsize == v2_hdrsize) {
+		if (self->entry_size == v2_entry_size) {
 			sidedata_comp_len = getbe32(data + pos + 72);
 		}
-		incr = self->hdrsize + comp_len + sidedata_comp_len;
+		incr = self->entry_size + comp_len + sidedata_comp_len;
 		if (offsets)
 			offsets[len] = data + pos;
 		len++;
@@ -2714,20 +2747,16 @@
 	}
 
 	if (revlogv2 && PyObject_IsTrue(revlogv2)) {
-		self->hdrsize = v2_hdrsize;
+		self->format_version = format_v2;
+		self->entry_size = v2_entry_size;
 	} else {
-		self->hdrsize = v1_hdrsize;
+		self->format_version = format_v1;
+		self->entry_size = v1_entry_size;
 	}
 
-	if (self->hdrsize == v1_hdrsize) {
-		self->nullentry =
-		    Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
-		                  -1, -1, -1, nullid, self->nodelen);
-	} else {
-		self->nullentry =
-		    Py_BuildValue(PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0,
-		                  -1, -1, -1, -1, nullid, self->nodelen, 0, 0);
-	}
+	self->nullentry =
+	    Py_BuildValue(PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0, -1, -1,
+	                  -1, -1, nullid, self->nodelen, 0, 0);
 
 	if (!self->nullentry)
 		return -1;
@@ -2750,11 +2779,11 @@
 			goto bail;
 		self->length = len;
 	} else {
-		if (size % self->hdrsize) {
+		if (size % self->entry_size) {
 			PyErr_SetString(PyExc_ValueError, "corrupt index file");
 			goto bail;
 		}
-		self->length = size / self->hdrsize;
+		self->length = size / self->entry_size;
 	}
 
 	return 0;
@@ -2859,6 +2888,10 @@
     {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
      "find length of shortest hex nodeid of a binary ID"},
     {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
+    {"entry_binary", (PyCFunction)index_entry_binary, METH_O,
+     "return an entry in binary form"},
+    {"pack_header", (PyCFunction)index_pack_header, METH_VARARGS,
+     "pack the revlog header information into binary"},
     {NULL} /* Sentinel */
 };
 
@@ -2868,7 +2901,7 @@
 };
 
 static PyMemberDef index_members[] = {
-    {"entry_size", T_LONG, offsetof(indexObject, hdrsize), 0,
+    {"entry_size", T_LONG, offsetof(indexObject, entry_size), 0,
      "size of an index entry"},
     {NULL} /* Sentinel */
 };
--- a/mercurial/changegroup.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/changegroup.py	Mon May 17 15:05:24 2021 +0200
@@ -7,7 +7,6 @@
 
 from __future__ import absolute_import
 
-import collections
 import os
 import struct
 import weakref
@@ -15,7 +14,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -34,10 +32,13 @@
 
 from .interfaces import repository
 from .revlogutils import sidedata as sidedatamod
+from .revlogutils import constants as revlog_constants
+from .utils import storageutil
 
 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
+_CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
 
 LFS_REQUIREMENT = b'lfs'
 
@@ -194,7 +195,8 @@
         else:
             deltabase = prevnode
         flags = 0
-        return node, p1, p2, deltabase, cs, flags
+        protocol_flags = 0
+        return node, p1, p2, deltabase, cs, flags, protocol_flags
 
     def deltachunk(self, prevnode):
         l = self._chunklength()
@@ -203,10 +205,9 @@
         headerdata = readexactly(self._stream, self.deltaheadersize)
         header = self.deltaheader.unpack(headerdata)
         delta = readexactly(self._stream, l - self.deltaheadersize)
-        node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
-        # cg4 forward-compat
-        sidedata = {}
-        return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
+        header = self._deltaheader(header, prevnode)
+        node, p1, p2, deltabase, cs, flags, protocol_flags = header
+        return node, p1, p2, cs, deltabase, delta, flags, protocol_flags
 
     def getchunks(self):
         """returns all the chunks contains in the bundle
@@ -293,8 +294,13 @@
 
         # Only useful if we're adding sidedata categories. If both peers have
         # the same categories, then we simply don't do anything.
-        if self.version == b'04' and srctype == b'pull':
-            sidedata_helpers = get_sidedata_helpers(
+        adding_sidedata = (
+            requirements.REVLOGV2_REQUIREMENT in repo.requirements
+            and self.version == b'04'
+            and srctype == b'pull'
+        )
+        if adding_sidedata:
+            sidedata_helpers = sidedatamod.get_sidedata_helpers(
                 repo,
                 sidedata_categories or set(),
                 pull=True,
@@ -386,15 +392,16 @@
                 _(b'manifests'), unit=_(b'chunks'), total=changesets
             )
             on_manifest_rev = None
-            if sidedata_helpers and b'manifest' in sidedata_helpers[1]:
+            if sidedata_helpers:
+                if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
 
-                def on_manifest_rev(manifest, rev):
-                    range = touched_manifests.get(manifest)
-                    if not range:
-                        touched_manifests[manifest] = (rev, rev)
-                    else:
-                        assert rev == range[1] + 1
-                        touched_manifests[manifest] = (range[0], rev)
+                    def on_manifest_rev(manifest, rev):
+                        range = touched_manifests.get(manifest)
+                        if not range:
+                            touched_manifests[manifest] = (rev, rev)
+                        else:
+                            assert rev == range[1] + 1
+                            touched_manifests[manifest] = (range[0], rev)
 
             self._unpackmanifests(
                 repo,
@@ -417,15 +424,16 @@
                         needfiles.setdefault(f, set()).add(n)
 
             on_filelog_rev = None
-            if sidedata_helpers and b'filelog' in sidedata_helpers[1]:
+            if sidedata_helpers:
+                if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
 
-                def on_filelog_rev(filelog, rev):
-                    range = touched_filelogs.get(filelog)
-                    if not range:
-                        touched_filelogs[filelog] = (rev, rev)
-                    else:
-                        assert rev == range[1] + 1
-                        touched_filelogs[filelog] = (range[0], rev)
+                    def on_filelog_rev(filelog, rev):
+                        range = touched_filelogs.get(filelog)
+                        if not range:
+                            touched_filelogs[filelog] = (rev, rev)
+                        else:
+                            assert rev == range[1] + 1
+                            touched_filelogs[filelog] = (range[0], rev)
 
             # process the files
             repo.ui.status(_(b"adding file changes\n"))
@@ -440,12 +448,14 @@
             )
 
             if sidedata_helpers:
-                if b'changelog' in sidedata_helpers[1]:
-                    cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1)
+                if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
+                    cl.rewrite_sidedata(
+                        trp, sidedata_helpers, clstart, clend - 1
+                    )
                 for mf, (startrev, endrev) in touched_manifests.items():
-                    mf.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+                    mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
                 for fl, (startrev, endrev) in touched_filelogs.items():
-                    fl.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+                    fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
 
             # making sure the value exists
             tr.changes.setdefault(b'changegroup-count-changesets', 0)
@@ -590,7 +600,8 @@
     def _deltaheader(self, headertuple, prevnode):
         node, p1, p2, deltabase, cs = headertuple
         flags = 0
-        return node, p1, p2, deltabase, cs, flags
+        protocol_flags = 0
+        return node, p1, p2, deltabase, cs, flags, protocol_flags
 
 
 class cg3unpacker(cg2unpacker):
@@ -608,7 +619,8 @@
 
     def _deltaheader(self, headertuple, prevnode):
         node, p1, p2, deltabase, cs, flags = headertuple
-        return node, p1, p2, deltabase, cs, flags
+        protocol_flags = 0
+        return node, p1, p2, deltabase, cs, flags, protocol_flags
 
     def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
         super(cg3unpacker, self)._unpackmanifests(
@@ -631,18 +643,24 @@
     cg4 streams add support for exchanging sidedata.
     """
 
+    deltaheader = _CHANGEGROUPV4_DELTA_HEADER
+    deltaheadersize = deltaheader.size
     version = b'04'
 
+    def _deltaheader(self, headertuple, prevnode):
+        protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
+        return node, p1, p2, deltabase, cs, flags, protocol_flags
+
     def deltachunk(self, prevnode):
         res = super(cg4unpacker, self).deltachunk(prevnode)
         if not res:
             return res
 
-        (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
+        (node, p1, p2, cs, deltabase, delta, flags, protocol_flags) = res
 
-        sidedata_raw = getchunk(self._stream)
         sidedata = {}
-        if len(sidedata_raw) > 0:
+        if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
+            sidedata_raw = getchunk(self._stream)
             sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
 
         return node, p1, p2, cs, deltabase, delta, flags, sidedata
@@ -673,7 +691,7 @@
 
     if delta.delta is not None:
         prefix, data = b'', delta.delta
-    elif delta.basenode == nullid:
+    elif delta.basenode == repo.nullid:
         data = delta.revision
         prefix = mdiff.trivialdiffheader(len(data))
     else:
@@ -688,10 +706,10 @@
         yield prefix
     yield data
 
-    sidedata = delta.sidedata
-    if sidedata is not None:
+    if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
         # Need a separate chunk for sidedata to be able to differentiate
         # "raw delta" length and sidedata length
+        sidedata = delta.sidedata
         yield chunkheader(len(sidedata))
         yield sidedata
 
@@ -787,9 +805,15 @@
                         return i
                 # We failed to resolve a parent for this node, so
                 # we crash the changegroup construction.
+                if util.safehasattr(store, 'target'):
+                    target = store.display_id
+                else:
+                    # some revlog not actually a revlog
+                    target = store._revlog.display_id
+
                 raise error.Abort(
                     b"unable to resolve parent while packing '%s' %r"
-                    b' for changeset %r' % (store.indexfile, rev, clrev)
+                    b' for changeset %r' % (target, rev, clrev)
                 )
 
         return nullrev
@@ -828,7 +852,8 @@
     If topic is not None, progress detail will be generated using this
     topic name (e.g. changesets, manifests, etc).
 
-    See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+    See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+    `sidedata_helpers`.
     """
     if not nodes:
         return
@@ -1056,7 +1081,9 @@
                 # TODO a better approach would be for the strip bundle to
                 # correctly advertise its sidedata categories directly.
                 remote_sidedata = repo._wanted_sidedata
-            sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata)
+            sidedata_helpers = sidedatamod.get_sidedata_helpers(
+                repo, remote_sidedata
+            )
 
         clstate, deltas = self._generatechangelog(
             cl,
@@ -1194,7 +1221,8 @@
         if generate is False, the state will be fully populated and no chunk
         stream will be yielded
 
-        See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+        See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+        `sidedata_helpers`.
         """
         clrevorder = {}
         manifests = {}
@@ -1299,7 +1327,8 @@
         `source` is unused here, but is used by extensions like remotefilelog to
         change what is sent based in pulls vs pushes, etc.
 
-        See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+        See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+        `sidedata_helpers`.
         """
         repo = self._repo
         mfl = repo.manifestlog
@@ -1633,11 +1662,18 @@
     fullnodes=None,
     remote_sidedata=None,
 ):
-    # Same header func as cg3. Sidedata is in a separate chunk from the delta to
-    # differenciate "raw delta" and sidedata.
-    builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
-        d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
-    )
+    # Sidedata is in a separate chunk from the delta to differentiate
+    # "raw delta" and sidedata.
+    def builddeltaheader(d):
+        return _CHANGEGROUPV4_DELTA_HEADER.pack(
+            d.protocol_flags,
+            d.node,
+            d.p1node,
+            d.p2node,
+            d.basenode,
+            d.linknode,
+            d.flags,
+        )
 
     return cgpacker(
         repo,
@@ -1682,11 +1718,14 @@
         #
         # (or even to push subset of history)
         needv03 = True
-    has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
-    if not has_revlogv2:
-        versions.discard(b'04')
     if not needv03:
         versions.discard(b'03')
+    want_v4 = (
+        repo.ui.configbool(b'experimental', b'changegroup4')
+        or requirements.REVLOGV2_REQUIREMENT in repo.requirements
+    )
+    if not want_v4:
+        versions.discard(b'04')
     return versions
 
 
@@ -1913,25 +1952,3 @@
                 )
 
     return revisions, files
-
-
-def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
-    # Computers for computing sidedata on-the-fly
-    sd_computers = collections.defaultdict(list)
-    # Computers for categories to remove from sidedata
-    sd_removers = collections.defaultdict(list)
-
-    to_generate = remote_sd_categories - repo._wanted_sidedata
-    to_remove = repo._wanted_sidedata - remote_sd_categories
-    if pull:
-        to_generate, to_remove = to_remove, to_generate
-
-    for revlog_kind, computers in repo._sidedata_computers.items():
-        for category, computer in computers.items():
-            if category in to_generate:
-                sd_computers[revlog_kind].append(computer)
-            if category in to_remove:
-                sd_removers[revlog_kind].append(computer)
-
-    sidedata_helpers = (repo, sd_computers, sd_removers)
-    return sidedata_helpers
--- a/mercurial/changelog.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/changelog.py	Mon May 17 15:05:24 2021 +0200
@@ -11,7 +11,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
 )
 from .thirdparty import attr
 
@@ -26,7 +25,10 @@
     dateutil,
     stringutil,
 )
-from .revlogutils import flagutil
+from .revlogutils import (
+    constants as revlog_constants,
+    flagutil,
+)
 
 _defaultextra = {b'branch': b'default'}
 
@@ -221,7 +223,7 @@
 
     def __new__(cls, cl, text, sidedata, cpsd):
         if not text:
-            return _changelogrevision(extra=_defaultextra, manifest=nullid)
+            return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
 
         self = super(changelogrevision, cls).__new__(cls)
         # We could return here and implement the following as an __init__.
@@ -393,27 +395,28 @@
         ``concurrencychecker`` will be passed to the revlog init function, see
         the documentation there.
         """
+
         if trypending and opener.exists(b'00changelog.i.a'):
-            indexfile = b'00changelog.i.a'
+            postfix = b'a'
         else:
-            indexfile = b'00changelog.i'
+            postfix = None
 
-        datafile = b'00changelog.d'
         revlog.revlog.__init__(
             self,
             opener,
-            indexfile,
-            datafile=datafile,
+            target=(revlog_constants.KIND_CHANGELOG, None),
+            radix=b'00changelog',
+            postfix=postfix,
             checkambig=True,
             mmaplargeindex=True,
             persistentnodemap=opener.options.get(b'persistent-nodemap', False),
             concurrencychecker=concurrencychecker,
         )
 
-        if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
+        if self._initempty and (self._format_version == revlog.REVLOGV1):
             # changelogs don't benefit from generaldelta.
 
-            self.version &= ~revlog.FLAG_GENERALDELTA
+            self._format_flags &= ~revlog.FLAG_GENERALDELTA
             self._generaldelta = False
 
         # Delta chains for changelogs tend to be very small because entries
@@ -428,7 +431,6 @@
         self._filteredrevs = frozenset()
         self._filteredrevs_hashcache = {}
         self._copiesstorage = opener.options.get(b'copies-storage')
-        self.revlog_kind = b'changelog'
 
     @property
     def filteredrevs(self):
@@ -447,13 +449,13 @@
         if not self._delayed:
             if len(self) == 0:
                 self._divert = True
-                if self._realopener.exists(self.indexfile + b'.a'):
-                    self._realopener.unlink(self.indexfile + b'.a')
-                self.opener = _divertopener(self._realopener, self.indexfile)
+                if self._realopener.exists(self._indexfile + b'.a'):
+                    self._realopener.unlink(self._indexfile + b'.a')
+                self.opener = _divertopener(self._realopener, self._indexfile)
             else:
                 self._delaybuf = []
                 self.opener = _delayopener(
-                    self._realopener, self.indexfile, self._delaybuf
+                    self._realopener, self._indexfile, self._delaybuf
                 )
         self._delayed = True
         tr.addpending(b'cl-%i' % id(self), self._writepending)
@@ -466,12 +468,12 @@
         # move redirected index data back into place
         if self._divert:
             assert not self._delaybuf
-            tmpname = self.indexfile + b".a"
+            tmpname = self._indexfile + b".a"
             nfile = self.opener.open(tmpname)
             nfile.close()
-            self.opener.rename(tmpname, self.indexfile, checkambig=True)
+            self.opener.rename(tmpname, self._indexfile, checkambig=True)
         elif self._delaybuf:
-            fp = self.opener(self.indexfile, b'a', checkambig=True)
+            fp = self.opener(self._indexfile, b'a', checkambig=True)
             fp.write(b"".join(self._delaybuf))
             fp.close()
             self._delaybuf = None
@@ -484,8 +486,8 @@
         pretxnchangegroup"""
         if self._delaybuf:
             # make a temporary copy of the index
-            fp1 = self._realopener(self.indexfile)
-            pendingfilename = self.indexfile + b".a"
+            fp1 = self._realopener(self._indexfile)
+            pendingfilename = self._indexfile + b".a"
             # register as a temp file to ensure cleanup on failure
             tr.registertmp(pendingfilename)
             # write existing data
@@ -497,16 +499,16 @@
             # switch modes so finalize can simply rename
             self._delaybuf = None
             self._divert = True
-            self.opener = _divertopener(self._realopener, self.indexfile)
+            self.opener = _divertopener(self._realopener, self._indexfile)
 
         if self._divert:
             return True
 
         return False
 
-    def _enforceinlinesize(self, tr, fp=None):
+    def _enforceinlinesize(self, tr):
         if not self._delayed:
-            revlog.revlog._enforceinlinesize(self, tr, fp)
+            revlog.revlog._enforceinlinesize(self, tr)
 
     def read(self, nodeorrev):
         """Obtain data from a parsed changelog revision.
--- a/mercurial/cmdutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/cmdutil.py	Mon May 17 15:05:24 2021 +0200
@@ -15,7 +15,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -62,6 +61,10 @@
     stringutil,
 )
 
+from .revlogutils import (
+    constants as revlog_constants,
+)
+
 if pycompat.TYPE_CHECKING:
     from typing import (
         Any,
@@ -998,11 +1001,6 @@
                 _(b"a branch of the same name already exists")
             )
 
-        if repo.revs(b'obsolete() and %ld', revs):
-            raise error.InputError(
-                _(b"cannot change branch of a obsolete changeset")
-            )
-
         # make sure only topological heads
         if repo.revs(b'heads(%ld) - head()', revs):
             raise error.InputError(
@@ -1097,7 +1095,7 @@
     'hint' is the usual hint given to Abort exception.
     """
 
-    if merge and repo.dirstate.p2() != nullid:
+    if merge and repo.dirstate.p2() != repo.nullid:
         raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
     st = repo.status()
     if st.modified or st.added or st.removed or st.deleted:
@@ -1434,8 +1432,12 @@
             raise error.CommandError(cmd, _(b'invalid arguments'))
         if not os.path.isfile(file_):
             raise error.InputError(_(b"revlog '%s' not found") % file_)
+
+        target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
         r = revlog.revlog(
-            vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
+            vfsmod.vfs(encoding.getcwd(), audit=False),
+            target=target,
+            radix=file_[:-2],
         )
     return r
 
@@ -1849,7 +1851,10 @@
             continue
         copylist.append((tfn(pat, dest, srcs), srcs))
     if not copylist:
-        raise error.InputError(_(b'no files to copy'))
+        hint = None
+        if rename:
+            hint = _(b'maybe you meant to use --after --at-rev=.')
+        raise error.InputError(_(b'no files to copy'), hint=hint)
 
     errors = 0
     for targetpath, srcs in copylist:
@@ -2104,7 +2109,7 @@
     if parents:
         prev = parents[0]
     else:
-        prev = nullid
+        prev = repo.nullid
 
     fm.context(ctx=ctx)
     fm.plain(b'# HG changeset patch\n')
@@ -2967,7 +2972,7 @@
         ms.reset()
 
         # Reroute the working copy parent to the new changeset
-        repo.setparents(newid, nullid)
+        repo.setparents(newid, repo.nullid)
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
@@ -3322,7 +3327,7 @@
 
         # in case of merge, files that are actually added can be reported as
         # modified, we need to post process the result
-        if p2 != nullid:
+        if p2 != repo.nullid:
             mergeadd = set(dsmodified)
             for path in dsmodified:
                 if path in mf:
@@ -3593,7 +3598,7 @@
         # We're reverting to our parent. If possible, we'd like status
         # to report the file as clean. We have to use normallookup for
         # merges to avoid losing information about merged/dirty files.
-        if p2 != nullid:
+        if p2 != repo.nullid:
             normal = repo.dirstate.normallookup
         else:
             normal = repo.dirstate.normal
@@ -3690,7 +3695,7 @@
             repo.dirstate.add(f)
 
     normal = repo.dirstate.normallookup
-    if node == parent and p2 == nullid:
+    if node == parent and p2 == repo.nullid:
         normal = repo.dirstate.normal
     for f in actions[b'undelete'][0]:
         if interactive:
--- a/mercurial/commands.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/commands.py	Mon May 17 15:05:24 2021 +0200
@@ -15,10 +15,8 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
     short,
-    wdirhex,
     wdirrev,
 )
 from .pycompat import open
@@ -486,7 +484,7 @@
                     return b'%d ' % rev
 
         def formathex(h):
-            if h == wdirhex:
+            if h == repo.nodeconstants.wdirhex:
                 return b'%s+' % shorthex(hex(ctx.p1().node()))
             else:
                 return b'%s ' % shorthex(h)
@@ -809,9 +807,9 @@
         )
 
     p1, p2 = repo.changelog.parents(node)
-    if p1 == nullid:
+    if p1 == repo.nullid:
         raise error.InputError(_(b'cannot backout a change with no parents'))
-    if p2 != nullid:
+    if p2 != repo.nullid:
         if not opts.get(b'parent'):
             raise error.InputError(_(b'cannot backout a merge changeset'))
         p = repo.lookup(opts[b'parent'])
@@ -1085,7 +1083,7 @@
                 )
         else:
             node, p2 = repo.dirstate.parents()
-            if p2 != nullid:
+            if p2 != repo.nullid:
                 raise error.StateError(_(b'current bisect revision is a merge'))
         if rev:
             if not nodes:
@@ -2204,6 +2202,7 @@
         (b'u', b'untrusted', None, _(b'show untrusted configuration options')),
         (b'e', b'edit', None, _(b'edit user config')),
         (b'l', b'local', None, _(b'edit repository config')),
+        (b'', b'source', None, _(b'show source of configuration value')),
         (
             b'',
             b'shared',
@@ -2234,7 +2233,7 @@
     --global, edit the system-wide config file. With --local, edit the
     repository-level config file.
 
-    With --debug, the source (filename and line number) is printed
+    With --source, the source (filename and line number) is printed
     for each config item.
 
     See :hg:`help config` for more information about config files.
@@ -2337,6 +2336,7 @@
     selentries = set(selentries)
 
     matched = False
+    show_source = ui.debugflag or opts.get(b'source')
     for section, name, value in ui.walkconfig(untrusted=untrusted):
         source = ui.configsource(section, name, untrusted)
         value = pycompat.bytestr(value)
@@ -2348,7 +2348,7 @@
         if values and not (section in selsections or entryname in selentries):
             continue
         fm.startitem()
-        fm.condwrite(ui.debugflag, b'source', b'%s: ', source)
+        fm.condwrite(show_source, b'source', b'%s: ', source)
         if uniquesel:
             fm.data(name=entryname)
             fm.write(b'value', b'%s\n', value)
@@ -4847,7 +4847,7 @@
 
     opts = pycompat.byteskwargs(opts)
     abort = opts.get(b'abort')
-    if abort and repo.dirstate.p2() == nullid:
+    if abort and repo.dirstate.p2() == repo.nullid:
         cmdutil.wrongtooltocontinue(repo, _(b'merge'))
     cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
     if abort:
@@ -5072,7 +5072,7 @@
 
     displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
     for n in p:
-        if n != nullid:
+        if n != repo.nullid:
             displayer.show(repo[n])
     displayer.close()
 
@@ -5128,15 +5128,9 @@
     """
 
     opts = pycompat.byteskwargs(opts)
+
+    pathitems = urlutil.list_paths(ui, search)
     ui.pager(b'paths')
-    if search:
-        pathitems = [
-            (name, path)
-            for name, path in pycompat.iteritems(ui.paths)
-            if name == search
-        ]
-    else:
-        pathitems = sorted(pycompat.iteritems(ui.paths))
 
     fm = ui.formatter(b'paths', opts)
     if fm.isplain():
@@ -5157,6 +5151,11 @@
             assert subopt not in (b'name', b'url')
             if showsubopts:
                 fm.plain(b'%s:%s = ' % (name, subopt))
+            if isinstance(value, bool):
+                if value:
+                    value = b'yes'
+                else:
+                    value = b'no'
             fm.condwrite(showsubopts, subopt, b'%s\n', value)
 
     fm.end()
@@ -6105,7 +6104,7 @@
     with repo.wlock():
         ms = mergestatemod.mergestate.read(repo)
 
-        if not (ms.active() or repo.dirstate.p2() != nullid):
+        if not (ms.active() or repo.dirstate.p2() != repo.nullid):
             raise error.StateError(
                 _(b'resolve command not applicable when not merging')
             )
@@ -6223,7 +6222,7 @@
                     raise
 
         ms.commit()
-        branchmerge = repo.dirstate.p2() != nullid
+        branchmerge = repo.dirstate.p2() != repo.nullid
         mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
 
         if not didwork and pats:
@@ -6315,7 +6314,7 @@
         opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
 
     parent, p2 = repo.dirstate.parents()
-    if not opts.get(b'rev') and p2 != nullid:
+    if not opts.get(b'rev') and p2 != repo.nullid:
         # revert after merge is a trap for new users (issue2915)
         raise error.InputError(
             _(b'uncommitted merge with no revision specified'),
@@ -6335,7 +6334,7 @@
         or opts.get(b'interactive')
     ):
         msg = _(b"no files or directories specified")
-        if p2 != nullid:
+        if p2 != repo.nullid:
             hint = _(
                 b"uncommitted merge, use --all to discard all changes,"
                 b" or 'hg update -C .' to abort the merge"
@@ -7396,7 +7395,7 @@
             for n in names:
                 if repo.tagtype(n) == b'global':
                     alltags = tagsmod.findglobaltags(ui, repo)
-                    if alltags[n][0] == nullid:
+                    if alltags[n][0] == repo.nullid:
                         raise error.InputError(
                             _(b"tag '%s' is already removed") % n
                         )
@@ -7423,7 +7422,7 @@
                     )
         if not opts.get(b'local'):
             p1, p2 = repo.dirstate.parents()
-            if p2 != nullid:
+            if p2 != repo.nullid:
                 raise error.StateError(_(b'uncommitted merge'))
             bheads = repo.branchheads()
             if not opts.get(b'force') and bheads and p1 not in bheads:
--- a/mercurial/commit.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/commit.py	Mon May 17 15:05:24 2021 +0200
@@ -10,7 +10,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
 )
 
@@ -277,10 +276,10 @@
     """
 
     fname = fctx.path()
-    fparent1 = manifest1.get(fname, nullid)
-    fparent2 = manifest2.get(fname, nullid)
+    fparent1 = manifest1.get(fname, repo.nullid)
+    fparent2 = manifest2.get(fname, repo.nullid)
     touched = None
-    if fparent1 == fparent2 == nullid:
+    if fparent1 == fparent2 == repo.nullid:
         touched = 'added'
 
     if isinstance(fctx, context.filectx):
@@ -291,9 +290,11 @@
         if node in [fparent1, fparent2]:
             repo.ui.debug(b'reusing %s filelog entry\n' % fname)
             if (
-                fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+                fparent1 != repo.nullid
+                and manifest1.flags(fname) != fctx.flags()
             ) or (
-                fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+                fparent2 != repo.nullid
+                and manifest2.flags(fname) != fctx.flags()
             ):
                 touched = 'modified'
             return node, touched
@@ -327,7 +328,9 @@
         newfparent = fparent2
 
         if manifest2:  # branch merge
-            if fparent2 == nullid or cnode is None:  # copied on remote side
+            if (
+                fparent2 == repo.nullid or cnode is None
+            ):  # copied on remote side
                 if cfname in manifest2:
                     cnode = manifest2[cfname]
                     newfparent = fparent1
@@ -346,7 +349,7 @@
             if includecopymeta:
                 meta[b"copy"] = cfname
                 meta[b"copyrev"] = hex(cnode)
-            fparent1, fparent2 = nullid, newfparent
+            fparent1, fparent2 = repo.nullid, newfparent
         else:
             repo.ui.warn(
                 _(
@@ -356,20 +359,20 @@
                 % (fname, cfname)
             )
 
-    elif fparent1 == nullid:
-        fparent1, fparent2 = fparent2, nullid
-    elif fparent2 != nullid:
+    elif fparent1 == repo.nullid:
+        fparent1, fparent2 = fparent2, repo.nullid
+    elif fparent2 != repo.nullid:
         if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
-            fparent1, fparent2 = fparent2, nullid
+            fparent1, fparent2 = fparent2, repo.nullid
         elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
-            fparent1, fparent2 = fparent1, nullid
+            fparent1, fparent2 = fparent1, repo.nullid
         # is one parent an ancestor of the other?
         else:
             fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
             if fparent1 in fparentancestors:
-                fparent1, fparent2 = fparent2, nullid
+                fparent1, fparent2 = fparent2, repo.nullid
             elif fparent2 in fparentancestors:
-                fparent2 = nullid
+                fparent2 = repo.nullid
 
     force_new_node = False
     # The file might have been deleted by merge code and user explicitly choose
@@ -384,9 +387,14 @@
         force_new_node = True
     # is the file changed?
     text = fctx.data()
-    if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
+    if (
+        fparent2 != repo.nullid
+        or meta
+        or flog.cmp(fparent1, text)
+        or force_new_node
+    ):
         if touched is None:  # do not overwrite added
-            if fparent2 == nullid:
+            if fparent2 == repo.nullid:
                 touched = 'modified'
             else:
                 touched = 'merged'
--- a/mercurial/config.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/config.py	Mon May 17 15:05:24 2021 +0200
@@ -258,93 +258,3 @@
         self.parse(
             path, fp.read(), sections=sections, remap=remap, include=include
         )
-
-
-def parselist(value):
-    """parse a configuration value as a list of comma/space separated strings
-
-    >>> parselist(b'this,is "a small" ,test')
-    ['this', 'is', 'a small', 'test']
-    """
-
-    def _parse_plain(parts, s, offset):
-        whitespace = False
-        while offset < len(s) and (
-            s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
-        ):
-            whitespace = True
-            offset += 1
-        if offset >= len(s):
-            return None, parts, offset
-        if whitespace:
-            parts.append(b'')
-        if s[offset : offset + 1] == b'"' and not parts[-1]:
-            return _parse_quote, parts, offset + 1
-        elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
-            parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
-            return _parse_plain, parts, offset + 1
-        parts[-1] += s[offset : offset + 1]
-        return _parse_plain, parts, offset + 1
-
-    def _parse_quote(parts, s, offset):
-        if offset < len(s) and s[offset : offset + 1] == b'"':  # ""
-            parts.append(b'')
-            offset += 1
-            while offset < len(s) and (
-                s[offset : offset + 1].isspace()
-                or s[offset : offset + 1] == b','
-            ):
-                offset += 1
-            return _parse_plain, parts, offset
-
-        while offset < len(s) and s[offset : offset + 1] != b'"':
-            if (
-                s[offset : offset + 1] == b'\\'
-                and offset + 1 < len(s)
-                and s[offset + 1 : offset + 2] == b'"'
-            ):
-                offset += 1
-                parts[-1] += b'"'
-            else:
-                parts[-1] += s[offset : offset + 1]
-            offset += 1
-
-        if offset >= len(s):
-            real_parts = _configlist(parts[-1])
-            if not real_parts:
-                parts[-1] = b'"'
-            else:
-                real_parts[0] = b'"' + real_parts[0]
-                parts = parts[:-1]
-                parts.extend(real_parts)
-            return None, parts, offset
-
-        offset += 1
-        while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
-            offset += 1
-
-        if offset < len(s):
-            if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
-                parts[-1] += b'"'
-                offset += 1
-            else:
-                parts.append(b'')
-        else:
-            return None, parts, offset
-
-        return _parse_plain, parts, offset
-
-    def _configlist(s):
-        s = s.rstrip(b' ,')
-        if not s:
-            return []
-        parser, parts, offset = _parse_plain, [b''], 0
-        while parser:
-            parser, parts, offset = parser(parts, s, offset)
-        return parts
-
-    if value is not None and isinstance(value, bytes):
-        result = _configlist(value.lstrip(b' ,\n'))
-    else:
-        result = value
-    return result or []
--- a/mercurial/configitems.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/configitems.py	Mon May 17 15:05:24 2021 +0200
@@ -904,6 +904,11 @@
 )
 coreconfigitem(
     b'experimental',
+    b'changegroup4',
+    default=False,
+)
+coreconfigitem(
+    b'experimental',
     b'cleanup-as-archived',
     default=False,
 )
@@ -954,6 +959,11 @@
 )
 coreconfigitem(
     b'experimental',
+    b'dirstate-tree.in-memory',
+    default=False,
+)
+coreconfigitem(
+    b'experimental',
     b'editortmpinhg',
     default=False,
 )
@@ -1138,6 +1148,16 @@
     b'revisions.prefixhexnode',
     default=False,
 )
+# "out of experimental" todo list.
+#
+# * to grow a docket file to at least store the last offset of the data
+#   file when rewriting sidedata.
+# * need a way of dealing with garbage data if we allow rewriting
+#   *existing* sidedata.
+# * Exchange-wise, we will also need to do something more efficient than
+#   keeping references to the affected revlogs, especially memory-wise when
+#   rewriting sidedata.
+# * Also... compress the sidedata? (this should be coming very soon)
 coreconfigitem(
     b'experimental',
     b'revlogv2',
@@ -1342,20 +1362,6 @@
     b'use-persistent-nodemap',
     default=_persistent_nodemap_default,
 )
-# TODO needs to grow a docket file to at least store the last offset of the data
-# file when rewriting sidedata.
-# Will also need a way of dealing with garbage data if we allow rewriting
-# *existing* sidedata.
-# Exchange-wise, we will also need to do something more efficient than keeping
-# references to the affected revlogs, especially memory-wise when rewriting
-# sidedata.
-# Also... compress the sidedata? (this should be coming very soon)
-coreconfigitem(
-    b'format',
-    b'exp-revlogv2.2',
-    default=False,
-    experimental=True,
-)
 coreconfigitem(
     b'format',
     b'exp-use-copies-side-data-changeset',
@@ -1364,12 +1370,6 @@
 )
 coreconfigitem(
     b'format',
-    b'exp-use-side-data',
-    default=False,
-    experimental=True,
-)
-coreconfigitem(
-    b'format',
     b'use-share-safe',
     default=False,
 )
--- a/mercurial/context.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/context.py	Mon May 17 15:05:24 2021 +0200
@@ -14,14 +14,9 @@
 
 from .i18n import _
 from .node import (
-    addednodeid,
     hex,
-    modifiednodeid,
-    nullid,
     nullrev,
     short,
-    wdirfilenodeids,
-    wdirhex,
 )
 from .pycompat import (
     getattr,
@@ -140,7 +135,7 @@
                 removed.append(fn)
             elif flag1 != flag2:
                 modified.append(fn)
-            elif node2 not in wdirfilenodeids:
+            elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
                 # When comparing files between two commits, we save time by
                 # not comparing the file contents when the nodeids differ.
                 # Note that this means we incorrectly report a reverted change
@@ -737,7 +732,7 @@
             n2 = c2._parents[0]._node
         cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
         if not cahs:
-            anc = nullid
+            anc = self._repo.nodeconstants.nullid
         elif len(cahs) == 1:
             anc = cahs[0]
         else:
@@ -1132,7 +1127,11 @@
         _path = self._path
         fl = self._filelog
         parents = self._filelog.parents(self._filenode)
-        pl = [(_path, node, fl) for node in parents if node != nullid]
+        pl = [
+            (_path, node, fl)
+            for node in parents
+            if node != self._repo.nodeconstants.nullid
+        ]
 
         r = fl.renamed(self._filenode)
         if r:
@@ -1393,6 +1392,9 @@
     def __bytes__(self):
         return bytes(self._parents[0]) + b"+"
 
+    def hex(self):
+        self._repo.nodeconstants.wdirhex
+
     __str__ = encoding.strmethod(__bytes__)
 
     def __nonzero__(self):
@@ -1556,12 +1558,12 @@
         return self._repo.dirstate[key] not in b"?r"
 
     def hex(self):
-        return wdirhex
+        return self._repo.nodeconstants.wdirhex
 
     @propertycache
     def _parents(self):
         p = self._repo.dirstate.parents()
-        if p[1] == nullid:
+        if p[1] == self._repo.nodeconstants.nullid:
             p = p[:-1]
         # use unfiltered repo to delay/avoid loading obsmarkers
         unfi = self._repo.unfiltered()
@@ -1572,7 +1574,9 @@
             for n in p
         ]
 
-    def setparents(self, p1node, p2node=nullid):
+    def setparents(self, p1node, p2node=None):
+        if p2node is None:
+            p2node = self._repo.nodeconstants.nullid
         dirstate = self._repo.dirstate
         with dirstate.parentchange():
             copies = dirstate.setparents(p1node, p2node)
@@ -1584,7 +1588,7 @@
                 for f in copies:
                     if f not in pctx and copies[f] in pctx:
                         dirstate.copy(copies[f], f)
-            if p2node == nullid:
+            if p2node == self._repo.nodeconstants.nullid:
                 for f, s in sorted(dirstate.copies().items()):
                     if f not in pctx and s not in pctx:
                         dirstate.copy(None, f)
@@ -1944,8 +1948,8 @@
 
         ff = self._flagfunc
         for i, l in (
-            (addednodeid, status.added),
-            (modifiednodeid, status.modified),
+            (self._repo.nodeconstants.addednodeid, status.added),
+            (self._repo.nodeconstants.modifiednodeid, status.modified),
         ):
             for f in l:
                 man[f] = i
@@ -2070,13 +2074,18 @@
         path = self.copysource()
         if not path:
             return None
-        return path, self._changectx._parents[0]._manifest.get(path, nullid)
+        return (
+            path,
+            self._changectx._parents[0]._manifest.get(
+                path, self._repo.nodeconstants.nullid
+            ),
+        )
 
     def parents(self):
         '''return parent filectxs, following copies if necessary'''
 
         def filenode(ctx, path):
-            return ctx._manifest.get(path, nullid)
+            return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
 
         path = self._path
         fl = self._filelog
@@ -2094,7 +2103,7 @@
         return [
             self._parentfilectx(p, fileid=n, filelog=l)
             for p, n, l in pl
-            if n != nullid
+            if n != self._repo.nodeconstants.nullid
         ]
 
     def children(self):
@@ -2222,7 +2231,9 @@
         # ``overlayworkingctx`` (e.g. with --collapse).
         util.clearcachedproperty(self, b'_manifest')
 
-    def setparents(self, p1node, p2node=nullid):
+    def setparents(self, p1node, p2node=None):
+        if p2node is None:
+            p2node = self._repo.nodeconstants.nullid
         assert p1node == self._wrappedctx.node()
         self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
 
@@ -2248,10 +2259,10 @@
 
         flag = self._flagfunc
         for path in self.added():
-            man[path] = addednodeid
+            man[path] = self._repo.nodeconstants.addednodeid
             man.setflag(path, flag(path))
         for path in self.modified():
-            man[path] = modifiednodeid
+            man[path] = self._repo.nodeconstants.modifiednodeid
             man.setflag(path, flag(path))
         for path in self.removed():
             del man[path]
@@ -2827,7 +2838,7 @@
         )
         self._rev = None
         self._node = None
-        parents = [(p or nullid) for p in parents]
+        parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
         p1, p2 = parents
         self._parents = [self._repo[p] for p in (p1, p2)]
         files = sorted(set(files))
@@ -2866,10 +2877,10 @@
         man = pctx.manifest().copy()
 
         for f in self._status.modified:
-            man[f] = modifiednodeid
+            man[f] = self._repo.nodeconstants.modifiednodeid
 
         for f in self._status.added:
-            man[f] = addednodeid
+            man[f] = self._repo.nodeconstants.addednodeid
 
         for f in self._status.removed:
             if f in man:
@@ -3006,12 +3017,12 @@
         # sanity check to ensure that the reused manifest parents are
         # manifests of our commit parents
         mp1, mp2 = self.manifestctx().parents
-        if p1 != nullid and p1.manifestnode() != mp1:
+        if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
             raise RuntimeError(
                 r"can't reuse the manifest: its p1 "
                 r"doesn't match the new ctx p1"
             )
-        if p2 != nullid and p2.manifestnode() != mp2:
+        if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
             raise RuntimeError(
                 r"can't reuse the manifest: "
                 r"its p2 doesn't match the new ctx p2"
--- a/mercurial/copies.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/copies.py	Mon May 17 15:05:24 2021 +0200
@@ -12,10 +12,7 @@
 import os
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 
 from . import (
     match as matchmod,
@@ -579,7 +576,7 @@
             parents = fctx._filelog.parents(fctx._filenode)
             nb_parents = 0
             for n in parents:
-                if n != nullid:
+                if n != repo.nullid:
                     nb_parents += 1
             return nb_parents >= 2
 
--- a/mercurial/debugcommands.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/debugcommands.py	Mon May 17 15:05:24 2021 +0200
@@ -30,7 +30,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -794,7 +793,7 @@
     index = r.index
     start = r.start
     length = r.length
-    generaldelta = r.version & revlog.FLAG_GENERALDELTA
+    generaldelta = r._generaldelta
     withsparseread = getattr(r, '_withsparseread', False)
 
     def revinfo(rev):
@@ -1667,7 +1666,7 @@
         node = r.node(i)
         pp = r.parents(node)
         ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
-        if pp[1] != nullid:
+        if pp[1] != repo.nullid:
             ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write(b"}\n")
 
@@ -1675,7 +1674,7 @@
 @command(b'debugindexstats', [])
 def debugindexstats(ui, repo):
     """show stats related to the changelog index"""
-    repo.changelog.shortest(nullid, 1)
+    repo.changelog.shortest(repo.nullid, 1)
     index = repo.changelog.index
     if not util.safehasattr(index, b'stats'):
         raise error.Abort(_(b'debugindexstats only works with native code'))
@@ -2425,7 +2424,7 @@
             # arbitrary node identifiers, possibly not present in the
             # local repository.
             n = bin(s)
-            if len(n) != len(nullid):
+            if len(n) != repo.nodeconstants.nodelen:
                 raise TypeError()
             return n
         except TypeError:
@@ -2973,8 +2972,8 @@
             )
         return 0
 
-    v = r.version
-    format = v & 0xFFFF
+    format = r._format_version
+    v = r._format_flags
     flags = []
     gdelta = False
     if v & revlog.FLAG_INLINE_DATA:
@@ -3328,7 +3327,7 @@
             try:
                 pp = r.parents(node)
             except Exception:
-                pp = [nullid, nullid]
+                pp = [repo.nullid, repo.nullid]
             if ui.verbose:
                 ui.write(
                     b"% 6d % 9d % 7d % 7d %s %s %s\n"
@@ -3742,7 +3741,9 @@
         for n in chlist:
             if limit is not None and count >= limit:
                 break
-            parents = [True for p in other.changelog.parents(n) if p != nullid]
+            parents = [
+                True for p in other.changelog.parents(n) if p != repo.nullid
+            ]
             if opts.get(b"no_merges") and len(parents) == 2:
                 continue
             count += 1
--- a/mercurial/dirstate.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/dirstate.py	Mon May 17 15:05:24 2021 +0200
@@ -14,7 +14,6 @@
 import stat
 
 from .i18n import _
-from .node import nullid
 from .pycompat import delattr
 
 from hgdemandimport import tracing
@@ -314,7 +313,7 @@
     def branch(self):
         return encoding.tolocal(self._branch)
 
-    def setparents(self, p1, p2=nullid):
+    def setparents(self, p1, p2=None):
         """Set dirstate parents to p1 and p2.
 
         When moving from two parents to one, 'm' merged entries a
@@ -323,6 +322,8 @@
 
         See localrepo.setparents()
         """
+        if p2 is None:
+            p2 = self._nodeconstants.nullid
         if self._parentwriters == 0:
             raise ValueError(
                 b"cannot set dirstate parent outside of "
@@ -335,10 +336,12 @@
             self._origpl = self._pl
         self._map.setparents(p1, p2)
         copies = {}
-        if oldp2 != nullid and p2 == nullid:
-            candidatefiles = self._map.nonnormalset.union(
-                self._map.otherparentset
-            )
+        if (
+            oldp2 != self._nodeconstants.nullid
+            and p2 == self._nodeconstants.nullid
+        ):
+            candidatefiles = self._map.non_normal_or_other_parent_paths()
+
             for f in candidatefiles:
                 s = self._map.get(f)
                 if s is None:
@@ -459,7 +462,7 @@
 
     def normallookup(self, f):
         '''Mark a file normal, but possibly dirty.'''
-        if self._pl[1] != nullid:
+        if self._pl[1] != self._nodeconstants.nullid:
             # if there is a merge going on and the file was either
             # in state 'm' (-1) or coming from other parent (-2) before
             # being removed, restore that state.
@@ -481,7 +484,7 @@
 
     def otherparent(self, f):
         '''Mark as coming from the other parent, always dirty.'''
-        if self._pl[1] == nullid:
+        if self._pl[1] == self._nodeconstants.nullid:
             raise error.Abort(
                 _(b"setting %r to other parent only allowed in merges") % f
             )
@@ -503,7 +506,7 @@
         self._dirty = True
         oldstate = self[f]
         size = 0
-        if self._pl[1] != nullid:
+        if self._pl[1] != self._nodeconstants.nullid:
             entry = self._map.get(f)
             if entry is not None:
                 # backup the previous state
@@ -519,7 +522,7 @@
 
     def merge(self, f):
         '''Mark a file merged.'''
-        if self._pl[1] == nullid:
+        if self._pl[1] == self._nodeconstants.nullid:
             return self.normallookup(f)
         return self.otherparent(f)
 
@@ -638,7 +641,7 @@
 
         if self._origpl is None:
             self._origpl = self._pl
-        self._map.setparents(parent, nullid)
+        self._map.setparents(parent, self._nodeconstants.nullid)
 
         for f in to_lookup:
             self.normallookup(f)
@@ -1459,7 +1462,7 @@
     def clear(self):
         self._map.clear()
         self.copymap.clear()
-        self.setparents(nullid, nullid)
+        self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
         util.clearcachedproperty(self, b"_dirs")
         util.clearcachedproperty(self, b"_alldirs")
         util.clearcachedproperty(self, b"filefoldmap")
@@ -1636,7 +1639,10 @@
                     st[self._nodelen : 2 * self._nodelen],
                 )
             elif l == 0:
-                self._parents = (nullid, nullid)
+                self._parents = (
+                    self._nodeconstants.nullid,
+                    self._nodeconstants.nullid,
+                )
             else:
                 raise error.Abort(
                     _(b'working directory state appears damaged!')
@@ -1718,6 +1724,9 @@
         self.nonnormalset = nonnorm
         return otherparents
 
+    def non_normal_or_other_parent_paths(self):
+        return self.nonnormalset.union(self.otherparentset)
+
     @propertycache
     def identity(self):
         self._map
@@ -1741,6 +1750,7 @@
             self._opener = opener
             self._root = root
             self._filename = b'dirstate'
+            self._nodelen = 20
             self._parents = None
             self._dirtyparents = False
 
@@ -1765,25 +1775,6 @@
         def get(self, *args, **kwargs):
             return self._rustmap.get(*args, **kwargs)
 
-        @propertycache
-        def _rustmap(self):
-            """
-            Fills the Dirstatemap when called.
-            Use `self._inner_rustmap` if reading the dirstate is not necessary.
-            """
-            self._rustmap = self._inner_rustmap
-            self.read()
-            return self._rustmap
-
-        @propertycache
-        def _inner_rustmap(self):
-            """
-            Does not fill the Dirstatemap when called. This allows for
-            optimizations where only setting/getting the parents is needed.
-            """
-            self._inner_rustmap = rustmod.DirstateMap(self._root)
-            return self._inner_rustmap
-
         @property
         def copymap(self):
             return self._rustmap.copymap()
@@ -1793,8 +1784,9 @@
 
         def clear(self):
             self._rustmap.clear()
-            self._inner_rustmap.clear()
-            self.setparents(nullid, nullid)
+            self.setparents(
+                self._nodeconstants.nullid, self._nodeconstants.nullid
+            )
             util.clearcachedproperty(self, b"_dirs")
             util.clearcachedproperty(self, b"_alldirs")
             util.clearcachedproperty(self, b"dirfoldmap")
@@ -1833,7 +1825,6 @@
             return fp
 
         def setparents(self, p1, p2):
-            self._rustmap.setparents(p1, p2)
             self._parents = (p1, p2)
             self._dirtyparents = True
 
@@ -1849,16 +1840,29 @@
                     # File doesn't exist, so the current state is empty
                     st = b''
 
-                try:
-                    self._parents = self._inner_rustmap.parents(st)
-                except ValueError:
+                l = len(st)
+                if l == self._nodelen * 2:
+                    self._parents = (
+                        st[: self._nodelen],
+                        st[self._nodelen : 2 * self._nodelen],
+                    )
+                elif l == 0:
+                    self._parents = (
+                        self._nodeconstants.nullid,
+                        self._nodeconstants.nullid,
+                    )
+                else:
                     raise error.Abort(
                         _(b'working directory state appears damaged!')
                     )
 
             return self._parents
 
-        def read(self):
+        @propertycache
+        def _rustmap(self):
+            """
+            Fills the Dirstatemap when called.
+            """
             # ignore HG_PENDING because identity is used only for writing
             self.identity = util.filestat.frompath(
                 self._opener.join(self._filename)
@@ -1873,18 +1877,24 @@
             except IOError as err:
                 if err.errno != errno.ENOENT:
                     raise
-                return
-            if not st:
-                return
+                st = b''
 
-            parse_dirstate = util.nogc(self._rustmap.read)
-            parents = parse_dirstate(st)
+            use_dirstate_tree = self._ui.configbool(
+                b"experimental",
+                b"dirstate-tree.in-memory",
+                False,
+            )
+            self._rustmap, parents = rustmod.DirstateMap.new(
+                use_dirstate_tree, st
+            )
+
             if parents and not self._dirtyparents:
                 self.setparents(*parents)
 
             self.__contains__ = self._rustmap.__contains__
             self.__getitem__ = self._rustmap.__getitem__
             self.get = self._rustmap.get
+            return self._rustmap
 
         def write(self, st, now):
             parents = self.parents()
@@ -1930,6 +1940,9 @@
             otherparents = self._rustmap.other_parent_entries()
             return otherparents
 
+        def non_normal_or_other_parent_paths(self):
+            return self._rustmap.non_normal_or_other_parent_paths()
+
         @propertycache
         def dirfoldmap(self):
             f = {}
--- a/mercurial/discovery.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/discovery.py	Mon May 17 15:05:24 2021 +0200
@@ -12,7 +12,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     short,
 )
 
@@ -107,7 +106,7 @@
         if missingroots:
             discbases = []
             for n in missingroots:
-                discbases.extend([p for p in cl.parents(n) if p != nullid])
+                discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
             # TODO remove call to nodesbetween.
             # TODO populate attributes on outgoing instance instead of setting
             # discbases.
@@ -116,7 +115,7 @@
             ancestorsof = heads
             commonheads = [n for n in discbases if n not in included]
         elif not commonheads:
-            commonheads = [nullid]
+            commonheads = [repo.nullid]
         self.commonheads = commonheads
         self.ancestorsof = ancestorsof
         self._revlog = cl
@@ -381,7 +380,7 @@
     # - a local outgoing head descended from update
     # - a remote head that's known locally and not
     #   ancestral to an outgoing head
-    if remoteheads == [nullid]:
+    if remoteheads == [repo.nullid]:
         # remote is empty, nothing to check.
         return
 
--- a/mercurial/dispatch.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/dispatch.py	Mon May 17 15:05:24 2021 +0200
@@ -1064,6 +1064,16 @@
     if req.earlyoptions[b'profile']:
         for ui_ in uis:
             ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
+    elif req.earlyoptions[b'profile'] is False:
+        # Check for it being set already, so that we don't pollute the config
+        # with this when using chg in the very common case that it's not
+        # enabled.
+        if lui.configbool(b'profiling', b'enabled'):
+            # Only do this on lui so that `chg foo` with a user config setting
+            # profiling.enabled=1 still shows profiling information (chg will
+            # specify `--no-profile` when `hg serve` is starting up, we don't
+            # want that to propagate to every later invocation).
+            lui.setconfig(b'profiling', b'enabled', b'false', b'--no-profile')
 
     profile = lui.configbool(b'profiling', b'enabled')
     with profiling.profile(lui, enabled=profile) as profiler:
--- a/mercurial/exchange.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/exchange.py	Mon May 17 15:05:24 2021 +0200
@@ -13,7 +13,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
 )
 from . import (
@@ -44,6 +43,7 @@
     stringutil,
     urlutil,
 )
+from .interfaces import repository
 
 urlerr = util.urlerr
 urlreq = util.urlreq
@@ -164,7 +164,7 @@
         hasnode = cl.hasnode
         common = [n for n in common if hasnode(n)]
     else:
-        common = [nullid]
+        common = [repo.nullid]
     if not heads:
         heads = cl.heads()
     return discovery.outgoing(repo, common, heads)
@@ -894,7 +894,7 @@
         cgpart.addparam(b'version', version)
     if scmutil.istreemanifest(pushop.repo):
         cgpart.addparam(b'treemanifest', b'1')
-    if b'exp-sidedata-flag' in pushop.repo.requirements:
+    if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
         cgpart.addparam(b'exp-sidedata', b'1')
 
     def handlereply(op):
@@ -1839,7 +1839,7 @@
     if (
         pullop.remote.capable(b'clonebundles')
         and pullop.heads is None
-        and list(pullop.common) == [nullid]
+        and list(pullop.common) == [pullop.repo.nullid]
     ):
         kwargs[b'cbattempted'] = pullop.clonebundleattempted
 
@@ -1849,7 +1849,7 @@
         pullop.repo.ui.status(_(b"no changes found\n"))
         pullop.cgresult = 0
     else:
-        if pullop.heads is None and list(pullop.common) == [nullid]:
+        if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
             pullop.repo.ui.status(_(b"requesting all changes\n"))
     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
         remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
@@ -1920,7 +1920,7 @@
         pullop.cgresult = 0
         return
     tr = pullop.gettransaction()
-    if pullop.heads is None and list(pullop.common) == [nullid]:
+    if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
         pullop.repo.ui.status(_(b"requesting all changes\n"))
     elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
         # issue1320, avoid a race if remote changed after discovery
@@ -2428,7 +2428,7 @@
     if scmutil.istreemanifest(repo):
         part.addparam(b'treemanifest', b'1')
 
-    if b'exp-sidedata-flag' in repo.requirements:
+    if repository.REPO_FEATURE_SIDE_DATA in repo.features:
         part.addparam(b'exp-sidedata', b'1')
         sidedata = bundle2.format_remote_wanted_sidedata(repo)
         part.addparam(b'exp-wanted-sidedata', sidedata)
--- a/mercurial/exchangev2.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/exchangev2.py	Mon May 17 15:05:24 2021 +0200
@@ -11,10 +11,7 @@
 import weakref
 
 from .i18n import _
-from .node import (
-    nullid,
-    short,
-)
+from .node import short
 from . import (
     bookmarks,
     error,
@@ -304,7 +301,7 @@
         if set(remoteheads).issubset(common):
             fetch = []
 
-    common.discard(nullid)
+    common.discard(repo.nullid)
 
     return common, fetch, remoteheads
 
@@ -413,7 +410,7 @@
                 # Linknode is always itself for changesets.
                 cset[b'node'],
                 # We always send full revisions. So delta base is not set.
-                nullid,
+                repo.nullid,
                 mdiff.trivialdiffheader(len(data)) + data,
                 # Flags not yet supported.
                 0,
@@ -478,7 +475,7 @@
                 basenode = manifest[b'deltabasenode']
                 delta = extrafields[b'delta']
             elif b'revision' in extrafields:
-                basenode = nullid
+                basenode = repo.nullid
                 revision = extrafields[b'revision']
                 delta = mdiff.trivialdiffheader(len(revision)) + revision
             else:
@@ -610,7 +607,7 @@
                 basenode = filerevision[b'deltabasenode']
                 delta = extrafields[b'delta']
             elif b'revision' in extrafields:
-                basenode = nullid
+                basenode = repo.nullid
                 revision = extrafields[b'revision']
                 delta = mdiff.trivialdiffheader(len(revision)) + revision
             else:
@@ -705,7 +702,7 @@
                 basenode = filerevision[b'deltabasenode']
                 delta = extrafields[b'delta']
             elif b'revision' in extrafields:
-                basenode = nullid
+                basenode = repo.nullid
                 revision = extrafields[b'revision']
                 delta = mdiff.trivialdiffheader(len(revision)) + revision
             else:
--- a/mercurial/filelog.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/filelog.py	Mon May 17 15:05:24 2021 +0200
@@ -8,10 +8,7 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from . import (
     error,
     revlog,
@@ -21,18 +18,24 @@
     util as interfaceutil,
 )
 from .utils import storageutil
+from .revlogutils import (
+    constants as revlog_constants,
+)
 
 
 @interfaceutil.implementer(repository.ifilestorage)
 class filelog(object):
     def __init__(self, opener, path):
         self._revlog = revlog.revlog(
-            opener, b'/'.join((b'data', path + b'.i')), censorable=True
+            opener,
+            # XXX should use the unencoded path
+            target=(revlog_constants.KIND_FILELOG, path),
+            radix=b'/'.join((b'data', path)),
+            censorable=True,
         )
         # Full name of the user visible file, relative to the repository root.
         # Used by LFS.
         self._revlog.filename = path
-        self._revlog.revlog_kind = b'filelog'
         self.nullid = self._revlog.nullid
 
     def __len__(self):
@@ -42,7 +45,7 @@
         return self._revlog.__iter__()
 
     def hasnode(self, node):
-        if node in (nullid, nullrev):
+        if node in (self.nullid, nullrev):
             return False
 
         try:
@@ -68,7 +71,7 @@
 
     def lookup(self, node):
         return storageutil.fileidlookup(
-            self._revlog, node, self._revlog.indexfile
+            self._revlog, node, self._revlog.display_id
         )
 
     def linkrev(self, rev):
@@ -225,18 +228,6 @@
             storedsize=storedsize,
         )
 
-    # TODO these aren't part of the interface and aren't internal methods.
-    # Callers should be fixed to not use them.
-
-    # Used by bundlefilelog, unionfilelog.
-    @property
-    def indexfile(self):
-        return self._revlog.indexfile
-
-    @indexfile.setter
-    def indexfile(self, value):
-        self._revlog.indexfile = value
-
     # Used by repo upgrade.
     def clone(self, tr, destrevlog, **kwargs):
         if not isinstance(destrevlog, filelog):
--- a/mercurial/filemerge.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/filemerge.py	Mon May 17 15:05:24 2021 +0200
@@ -15,7 +15,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     short,
 )
 from .pycompat import (
@@ -111,7 +110,7 @@
         return None
 
     def filenode(self):
-        return nullid
+        return self._ctx.repo().nullid
 
     _customcmp = True
 
--- a/mercurial/help.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/help.py	Mon May 17 15:05:24 2021 +0200
@@ -540,6 +540,12 @@
             TOPIC_CATEGORY_CONCEPTS,
         ),
         (
+            [b"evolution"],
+            _(b"Safely rewriting history (EXPERIMENTAL)"),
+            loaddoc(b'evolution'),
+            TOPIC_CATEGORY_CONCEPTS,
+        ),
+        (
             [b'scripting'],
             _(b'Using Mercurial from scripts and automation'),
             loaddoc(b'scripting'),
--- a/mercurial/helptext/config.txt	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/helptext/config.txt	Mon May 17 15:05:24 2021 +0200
@@ -5,7 +5,7 @@
 ===============
 
 If you're having problems with your configuration,
-:hg:`config --debug` can help you understand what is introducing
+:hg:`config --source` can help you understand what is introducing
 a setting into your environment.
 
 See :hg:`help config.syntax` and :hg:`help config.files`
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/evolution.txt	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,56 @@
+Obsolescence markers make it possible to mark changesets that have been
+deleted or superseded in a new version of the changeset.
+
+Unlike the previous way of handling such changes, by stripping the old
+changesets from the repository, obsolescence markers can be propagated
+between repositories. This allows for a safe and simple way of exchanging
+mutable history and altering it after the fact. Changeset phases are
+respected, such that only draft and secret changesets can be altered (see
+:hg:`help phases` for details).
+
+Obsolescence is tracked using "obsolescence markers", a piece of metadata
+tracking which changesets have been made obsolete, potential successors for
+a given changeset, the moment the changeset was marked as obsolete, and the
+user who performed the rewriting operation. The markers are stored
+separately from standard changeset data can be exchanged without any of the
+precursor changesets, preventing unnecessary exchange of obsolescence data.
+
+The complete set of obsolescence markers describes a history of changeset
+modifications that is orthogonal to the repository history of file
+modifications. This changeset history allows for detection and automatic
+resolution of edge cases arising from multiple users rewriting the same part
+of history concurrently.
+
+Current feature status
+======================
+
+This feature is still in development.
+
+Instability
+===========
+
+Rewriting changesets might introduce instability.
+
+There are two main kinds of instability: orphaning and diverging.
+
+Orphans are changesets left behind when their ancestors are rewritten.
+Divergence has two variants:
+
+* Content-divergence occurs when independent rewrites of the same changesets
+  lead to different results.
+
+* Phase-divergence occurs when the old (obsolete) version of a changeset
+  becomes public.
+
+It is possible to prevent local creation of orphans by using the following config::
+
+    [experimental]
+    evolution.createmarkers = true
+    evolution.exchange = true
+
+You can also enable that option explicitly::
+
+    [experimental]
+    evolution.createmarkers = true
+    evolution.exchange = true
+    evolution.allowunstable = true
--- a/mercurial/helptext/internals/changegroups.txt	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/helptext/internals/changegroups.txt	Mon May 17 15:05:24 2021 +0200
@@ -2,12 +2,13 @@
 the changelog data, root/flat manifest data, treemanifest data, and
 filelogs.
 
-There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
+There are 4 versions of changegroups: ``1``, ``2``, ``3`` and ``4``. From a
 high-level, versions ``1`` and ``2`` are almost exactly the same, with the
 only difference being an additional item in the *delta header*. Version
 ``3`` adds support for storage flags in the *delta header* and optionally
 exchanging treemanifests (enabled by setting an option on the
-``changegroup`` part in the bundle2).
+``changegroup`` part in the bundle2). Version ``4`` adds support for exchanging
+sidedata (additional revision metadata not part of the digest).
 
 Changegroups when not exchanging treemanifests consist of 3 logical
 segments::
@@ -74,8 +75,8 @@
 entry (either that the recipient already has, or previously specified in the
 bundle/changegroup).
 
-The *delta header* is different between versions ``1``, ``2``, and
-``3`` of the changegroup format.
+The *delta header* is different between versions ``1``, ``2``, ``3`` and ``4``
+of the changegroup format.
 
 Version 1 (headerlen=80)::
 
@@ -104,6 +105,15 @@
    |            |             |             |            |            |           |
    +------------------------------------------------------------------------------+
 
+Version 4 (headerlen=103)::
+
+   +------------------------------------------------------------------------------+----------+
+   |            |             |             |            |            |           |          |
+   |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |  pflags  |
+   | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+   |            |             |             |            |            |           |          |
+   +------------------------------------------------------------------------------+----------+
+
 The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
 series of *delta*s, densely packed (no separators). These deltas describe a diff
 from an existing entry (either that the recipient already has, or previously
@@ -140,12 +150,24 @@
    Externally stored. The revision fulltext contains ``key:value`` ``\n``
    delimited metadata defining an object stored elsewhere. Used by the LFS
    extension.
+4096
+   Contains copy information. This revision changes files in a way that could
+   affect copy tracing. This does *not* affect changegroup handling, but is
+   relevant for other parts of Mercurial.
 
 For historical reasons, the integer values are identical to revlog version 1
 per-revision storage flags and correspond to bits being set in this 2-byte
 field. Bits were allocated starting from the most-significant bit, hence the
 reverse ordering and allocation of these flags.
 
+The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+itself. They are first in the header since they may affect the handling of the
+rest of the fields in a future version. They are defined as such:
+
+1 indicates whether to read a chunk of sidedata (of variable length) right
+  after the revision flags.
+
+
 Changeset Segment
 =================
 
@@ -166,9 +188,9 @@
 Treemanifests Segment
 ---------------------
 
-The *treemanifests segment* only exists in changegroup version ``3``, and
-only if the 'treemanifest' param is part of the bundle2 changegroup part
-(it is not possible to use changegroup version 3 outside of bundle2).
+The *treemanifests segment* only exists in changegroup version ``3`` and ``4``,
+and only if the 'treemanifest' param is part of the bundle2 changegroup part
+(it is not possible to use changegroup version 3 or 4 outside of bundle2).
 Aside from the filenames in the *treemanifests segment* containing a
 trailing ``/`` character, it behaves identically to the *filelogs segment*
 (see below). The final sub-segment is followed by an *empty chunk* (logically,
--- a/mercurial/hg.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/hg.py	Mon May 17 15:05:24 2021 +0200
@@ -16,8 +16,7 @@
 from .i18n import _
 from .node import (
     hex,
-    nullhex,
-    nullid,
+    sha1nodeconstants,
     short,
 )
 from .pycompat import getattr
@@ -772,7 +771,7 @@
                             },
                         ).result()
 
-                    if rootnode != nullid:
+                    if rootnode != sha1nodeconstants.nullid:
                         sharepath = os.path.join(sharepool, hex(rootnode))
                     else:
                         ui.status(
@@ -883,7 +882,9 @@
             # we need to re-init the repo after manually copying the data
             # into it
             destpeer = peer(srcrepo, peeropts, dest)
-            srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
+            srcrepo.hook(
+                b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
+            )
         else:
             try:
                 # only pass ui when no srcrepo
@@ -1329,7 +1330,9 @@
         for n in chlist:
             if limit is not None and count >= limit:
                 break
-            parents = [p for p in other.changelog.parents(n) if p != nullid]
+            parents = [
+                p for p in other.changelog.parents(n) if p != repo.nullid
+            ]
             if opts.get(b'no_merges') and len(parents) == 2:
                 continue
             count += 1
@@ -1406,7 +1409,7 @@
     for n in revs:
         if limit is not None and count >= limit:
             break
-        parents = [p for p in cl.parents(n) if p != nullid]
+        parents = [p for p in cl.parents(n) if p != repo.nullid]
         if no_merges and len(parents) == 2:
             continue
         count += 1
--- a/mercurial/hgweb/server.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/hgweb/server.py	Mon May 17 15:05:24 2021 +0200
@@ -344,7 +344,7 @@
 try:
     import threading
 
-    threading.activeCount()  # silence pyflakes and bypass demandimport
+    threading.active_count()  # silence pyflakes and bypass demandimport
     _mixin = socketserver.ThreadingMixIn
 except ImportError:
     if util.safehasattr(os, b"fork"):
--- a/mercurial/hgweb/webutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/hgweb/webutil.py	Mon May 17 15:05:24 2021 +0200
@@ -14,7 +14,7 @@
 import re
 
 from ..i18n import _
-from ..node import hex, nullid, short
+from ..node import hex, short
 from ..pycompat import setattr
 
 from .common import (
@@ -220,7 +220,7 @@
 def _siblings(siblings=None, hiderev=None):
     if siblings is None:
         siblings = []
-    siblings = [s for s in siblings if s.node() != nullid]
+    siblings = [s for s in siblings if s.node() != s.repo().nullid]
     if len(siblings) == 1 and siblings[0].rev() == hiderev:
         siblings = []
     return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
@@ -316,12 +316,16 @@
         yield {name: t}
 
 
-def showtag(repo, t1, node=nullid):
+def showtag(repo, t1, node=None):
+    if node is None:
+        node = repo.nullid
     args = (repo.nodetags, node, b'tag')
     return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
 
 
-def showbookmark(repo, t1, node=nullid):
+def showbookmark(repo, t1, node=None):
+    if node is None:
+        node = repo.nullid
     args = (repo.nodebookmarks, node, b'bookmark')
     return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
 
--- a/mercurial/interfaces/dirstate.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/interfaces/dirstate.py	Mon May 17 15:05:24 2021 +0200
@@ -2,8 +2,6 @@
 
 import contextlib
 
-from .. import node as nodemod
-
 from . import util as interfaceutil
 
 
@@ -97,7 +95,7 @@
     def branch():
         pass
 
-    def setparents(p1, p2=nodemod.nullid):
+    def setparents(p1, p2=None):
         """Set dirstate parents to p1 and p2.
 
         When moving from two parents to one, 'm' merged entries a
--- a/mercurial/interfaces/repository.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/interfaces/repository.py	Mon May 17 15:05:24 2021 +0200
@@ -21,20 +21,20 @@
 REPO_FEATURE_LFS = b'lfs'
 # Repository supports being stream cloned.
 REPO_FEATURE_STREAM_CLONE = b'streamclone'
+# Repository supports (at least) some sidedata to be stored
+REPO_FEATURE_SIDE_DATA = b'side-data'
 # Files storage may lack data for all ancestors.
 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
 
 REVISION_FLAG_CENSORED = 1 << 15
 REVISION_FLAG_ELLIPSIS = 1 << 14
 REVISION_FLAG_EXTSTORED = 1 << 13
-REVISION_FLAG_SIDEDATA = 1 << 12
-REVISION_FLAG_HASCOPIESINFO = 1 << 11
+REVISION_FLAG_HASCOPIESINFO = 1 << 12
 
 REVISION_FLAGS_KNOWN = (
     REVISION_FLAG_CENSORED
     | REVISION_FLAG_ELLIPSIS
     | REVISION_FLAG_EXTSTORED
-    | REVISION_FLAG_SIDEDATA
     | REVISION_FLAG_HASCOPIESINFO
 )
 
@@ -457,6 +457,13 @@
         """Raw sidedata bytes for the given revision."""
     )
 
+    protocol_flags = interfaceutil.Attribute(
+        """Single byte of integer flags that can influence the protocol.
+
+        This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
+        """
+    )
+
 
 class ifilerevisionssequence(interfaceutil.Interface):
     """Contains index data for all revisions of a file.
@@ -1162,13 +1169,6 @@
         """An ``ifilerevisionssequence`` instance."""
     )
 
-    indexfile = interfaceutil.Attribute(
-        """Path of revlog index file.
-
-        TODO this is revlog specific and should not be exposed.
-        """
-    )
-
     opener = interfaceutil.Attribute(
         """VFS opener to use to access underlying files used for storage.
 
@@ -1176,13 +1176,6 @@
         """
     )
 
-    version = interfaceutil.Attribute(
-        """Revlog version number.
-
-        TODO this is revlog specific and should not be exposed.
-        """
-    )
-
     _generaldelta = interfaceutil.Attribute(
         """Whether generaldelta storage is being used.
 
@@ -1851,7 +1844,9 @@
     def savecommitmessage(text):
         pass
 
-    def register_sidedata_computer(kind, category, keys, computer):
+    def register_sidedata_computer(
+        kind, category, keys, computer, flags, replace=False
+    ):
         pass
 
     def register_wanted_sidedata(category):
--- a/mercurial/localrepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/localrepo.py	Mon May 17 15:05:24 2021 +0200
@@ -19,7 +19,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     sha1nodeconstants,
     short,
@@ -50,7 +49,6 @@
     match as matchmod,
     mergestate as mergestatemod,
     mergeutil,
-    metadata as metadatamod,
     namespaces,
     narrowspec,
     obsolete,
@@ -91,6 +89,7 @@
 from .revlogutils import (
     concurrency_checker as revlogchecker,
     constants as revlogconst,
+    sidedata as sidedatamod,
 )
 
 release = lockmod.release
@@ -738,6 +737,9 @@
     storevfs = store.vfs
     storevfs.options = resolvestorevfsoptions(ui, requirements, features)
 
+    if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
+        features.add(repository.REPO_FEATURE_SIDE_DATA)
+
     # The cache vfs is used to manage cache files.
     cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
     cachevfs.createmode = store.createmode
@@ -1064,9 +1066,6 @@
     if sparserevlog:
         options[b'generaldelta'] = True
 
-    sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
-    options[b'side-data'] = sidedata
-
     maxchainlen = None
     if sparserevlog:
         maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
@@ -1219,7 +1218,6 @@
         requirementsmod.TREEMANIFEST_REQUIREMENT,
         requirementsmod.COPIESSDC_REQUIREMENT,
         requirementsmod.REVLOGV2_REQUIREMENT,
-        requirementsmod.SIDEDATA_REQUIREMENT,
         requirementsmod.SPARSEREVLOG_REQUIREMENT,
         requirementsmod.NODEMAP_REQUIREMENT,
         bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
@@ -1408,7 +1406,7 @@
 
         self._wanted_sidedata = set()
         self._sidedata_computers = {}
-        metadatamod.set_sidedata_spec_for_repo(self)
+        sidedatamod.set_sidedata_spec_for_repo(self)
 
     def _getvfsward(self, origfunc):
         """build a ward for self.vfs"""
@@ -1702,7 +1700,7 @@
                     _(b"warning: ignoring unknown working parent %s!\n")
                     % short(node)
                 )
-            return nullid
+            return self.nullid
 
     @storecache(narrowspec.FILENAME)
     def narrowpats(self):
@@ -1753,9 +1751,9 @@
     @unfilteredpropertycache
     def _quick_access_changeid_null(self):
         return {
-            b'null': (nullrev, nullid),
-            nullrev: (nullrev, nullid),
-            nullid: (nullrev, nullid),
+            b'null': (nullrev, self.nodeconstants.nullid),
+            nullrev: (nullrev, self.nodeconstants.nullid),
+            self.nullid: (nullrev, self.nullid),
         }
 
     @unfilteredpropertycache
@@ -1765,7 +1763,7 @@
         quick = self._quick_access_changeid_null.copy()
         cl = self.unfiltered().changelog
         for node in self.dirstate.parents():
-            if node == nullid:
+            if node == self.nullid:
                 continue
             rev = cl.index.get_rev(node)
             if rev is None:
@@ -1785,7 +1783,7 @@
                 quick[r] = pair
                 quick[n] = pair
         p1node = self.dirstate.p1()
-        if p1node != nullid:
+        if p1node != self.nullid:
             quick[b'.'] = quick[p1node]
         return quick
 
@@ -1841,7 +1839,7 @@
                 # when we know that '.' won't be hidden
                 node = self.dirstate.p1()
                 rev = self.unfiltered().changelog.rev(node)
-            elif len(changeid) == 20:
+            elif len(changeid) == self.nodeconstants.nodelen:
                 try:
                     node = changeid
                     rev = self.changelog.rev(changeid)
@@ -1862,7 +1860,7 @@
                     changeid = hex(changeid)  # for the error message
                     raise
 
-            elif len(changeid) == 40:
+            elif len(changeid) == 2 * self.nodeconstants.nodelen:
                 node = bin(changeid)
                 rev = self.changelog.rev(node)
             else:
@@ -2037,7 +2035,7 @@
         # local encoding.
         tags = {}
         for (name, (node, hist)) in pycompat.iteritems(alltags):
-            if node != nullid:
+            if node != self.nullid:
                 tags[encoding.tolocal(name)] = node
         tags[b'tip'] = self.changelog.tip()
         tagtypes = {
@@ -2161,7 +2159,9 @@
     def wjoin(self, f, *insidef):
         return self.vfs.reljoin(self.root, f, *insidef)
 
-    def setparents(self, p1, p2=nullid):
+    def setparents(self, p1, p2=None):
+        if p2 is None:
+            p2 = self.nullid
         self[None].setparents(p1, p2)
         self._quick_access_changeid_invalidate()
 
@@ -3100,7 +3100,7 @@
                 subrepoutil.writestate(self, newstate)
 
             p1, p2 = self.dirstate.parents()
-            hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
+            hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
             try:
                 self.hook(
                     b"precommit", throw=True, parent1=hookp1, parent2=hookp2
@@ -3273,7 +3273,7 @@
             t = n
             while True:
                 p = self.changelog.parents(n)
-                if p[1] != nullid or p[0] == nullid:
+                if p[1] != self.nullid or p[0] == self.nullid:
                     b.append((t, n, p[0], p[1]))
                     break
                 n = p[0]
@@ -3286,7 +3286,7 @@
             n, l, i = top, [], 0
             f = 1
 
-            while n != bottom and n != nullid:
+            while n != bottom and n != self.nullid:
                 p = self.changelog.parents(n)[0]
                 if i == f:
                     l.append(n)
@@ -3370,20 +3370,32 @@
         return self.pathto(fp.name[len(self.root) + 1 :])
 
     def register_wanted_sidedata(self, category):
+        if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
+            # Only revlogv2 repos can want sidedata.
+            return
         self._wanted_sidedata.add(pycompat.bytestr(category))
 
-    def register_sidedata_computer(self, kind, category, keys, computer):
-        if kind not in (b"changelog", b"manifest", b"filelog"):
+    def register_sidedata_computer(
+        self, kind, category, keys, computer, flags, replace=False
+    ):
+        if kind not in revlogconst.ALL_KINDS:
             msg = _(b"unexpected revlog kind '%s'.")
             raise error.ProgrammingError(msg % kind)
         category = pycompat.bytestr(category)
-        if category in self._sidedata_computers.get(kind, []):
+        already_registered = category in self._sidedata_computers.get(kind, [])
+        if already_registered and not replace:
             msg = _(
                 b"cannot register a sidedata computer twice for category '%s'."
             )
             raise error.ProgrammingError(msg % category)
+        if replace and not already_registered:
+            msg = _(
+                b"cannot replace a sidedata computer that isn't registered "
+                b"for category '%s'."
+            )
+            raise error.ProgrammingError(msg % category)
         self._sidedata_computers.setdefault(kind, {})
-        self._sidedata_computers[kind][category] = (keys, computer)
+        self._sidedata_computers[kind][category] = (keys, computer, flags)
 
 
 # used to avoid circular references so destructors work
@@ -3507,16 +3519,10 @@
         if ui.configbool(b'format', b'sparse-revlog'):
             requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
 
-    # experimental config: format.exp-use-side-data
-    if ui.configbool(b'format', b'exp-use-side-data'):
-        requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
-        requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
-        requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
     # experimental config: format.exp-use-copies-side-data-changeset
     if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
         requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
         requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
-        requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
         requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
     if ui.configbool(b'experimental', b'treemanifest'):
         requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
@@ -3524,8 +3530,6 @@
     revlogv2 = ui.config(b'experimental', b'revlogv2')
     if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
         requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
-        # generaldelta is implied by revlogv2.
-        requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
         requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
     # experimental config: format.internal-phase
     if ui.configbool(b'format', b'internal-phase'):
--- a/mercurial/logcmdutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/logcmdutil.py	Mon May 17 15:05:24 2021 +0200
@@ -12,12 +12,7 @@
 import posixpath
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-    wdirid,
-    wdirrev,
-)
+from .node import nullrev, wdirrev
 
 from .thirdparty import attr
 
@@ -357,7 +352,7 @@
         if self.ui.debugflag:
             mnode = ctx.manifestnode()
             if mnode is None:
-                mnode = wdirid
+                mnode = self.repo.nodeconstants.wdirid
                 mrev = wdirrev
             else:
                 mrev = self.repo.manifestlog.rev(mnode)
@@ -505,7 +500,11 @@
         )
 
         if self.ui.debugflag or b'manifest' in datahint:
-            fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
+            fm.data(
+                manifest=fm.hexfunc(
+                    ctx.manifestnode() or self.repo.nodeconstants.wdirid
+                )
+            )
         if self.ui.debugflag or b'extra' in datahint:
             fm.data(extra=fm.formatdict(ctx.extra()))
 
@@ -991,7 +990,7 @@
     """Return the initial set of revisions to be filtered or followed"""
     if wopts.revspec:
         revs = scmutil.revrange(repo, wopts.revspec)
-    elif wopts.follow and repo.dirstate.p1() == nullid:
+    elif wopts.follow and repo.dirstate.p1() == repo.nullid:
         revs = smartset.baseset()
     elif wopts.follow:
         revs = repo.revs(b'.')
--- a/mercurial/manifest.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/manifest.py	Mon May 17 15:05:24 2021 +0200
@@ -16,7 +16,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from .pycompat import getattr
@@ -35,6 +34,9 @@
     repository,
     util as interfaceutil,
 )
+from .revlogutils import (
+    constants as revlog_constants,
+)
 
 parsers = policy.importmod('parsers')
 propertycache = util.propertycache
@@ -43,7 +45,7 @@
 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
 
 
-def _parse(data):
+def _parse(nodelen, data):
     # This method does a little bit of excessive-looking
     # precondition checking. This is so that the behavior of this
     # class exactly matches its C counterpart to try and help
@@ -64,7 +66,7 @@
             nl -= 1
         else:
             flags = b''
-        if nl not in (40, 64):
+        if nl != 2 * nodelen:
             raise ValueError(b'Invalid manifest line')
 
         yield f, bin(n), flags
@@ -132,7 +134,7 @@
         else:
             hlen = nlpos - zeropos - 1
             flags = b''
-        if hlen not in (40, 64):
+        if hlen != 2 * self.lm._nodelen:
             raise error.StorageError(b'Invalid manifest line')
         hashval = unhexlify(
             data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
@@ -177,12 +179,14 @@
 
     def __init__(
         self,
+        nodelen,
         data,
         positions=None,
         extrainfo=None,
         extradata=None,
         hasremovals=False,
     ):
+        self._nodelen = nodelen
         if positions is None:
             self.positions = self.findlines(data)
             self.extrainfo = [0] * len(self.positions)
@@ -289,7 +293,7 @@
             hlen -= 1
         else:
             flags = b''
-        if hlen not in (40, 64):
+        if hlen != 2 * self._nodelen:
             raise error.StorageError(b'Invalid manifest line')
         hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
         return (hashval, flags)
@@ -345,6 +349,7 @@
     def copy(self):
         # XXX call _compact like in C?
         return _lazymanifest(
+            self._nodelen,
             self.data,
             self.positions,
             self.extrainfo,
@@ -455,7 +460,7 @@
 
     def filtercopy(self, filterfn):
         # XXX should be optimized
-        c = _lazymanifest(b'')
+        c = _lazymanifest(self._nodelen, b'')
         for f, n, fl in self.iterentries():
             if filterfn(f):
                 c[f] = n, fl
@@ -470,8 +475,9 @@
 
 @interfaceutil.implementer(repository.imanifestdict)
 class manifestdict(object):
-    def __init__(self, data=b''):
-        self._lm = _lazymanifest(data)
+    def __init__(self, nodelen, data=b''):
+        self._nodelen = nodelen
+        self._lm = _lazymanifest(nodelen, data)
 
     def __getitem__(self, key):
         return self._lm[key][0]
@@ -579,14 +585,14 @@
             return self.copy()
 
         if self._filesfastpath(match):
-            m = manifestdict()
+            m = manifestdict(self._nodelen)
             lm = self._lm
             for fn in match.files():
                 if fn in lm:
                     m._lm[fn] = lm[fn]
             return m
 
-        m = manifestdict()
+        m = manifestdict(self._nodelen)
         m._lm = self._lm.filtercopy(match)
         return m
 
@@ -629,7 +635,7 @@
             return b''
 
     def copy(self):
-        c = manifestdict()
+        c = manifestdict(self._nodelen)
         c._lm = self._lm.copy()
         return c
 
@@ -795,7 +801,8 @@
     def __init__(self, nodeconstants, dir=b'', text=b''):
         self._dir = dir
         self.nodeconstants = nodeconstants
-        self._node = nullid
+        self._node = self.nodeconstants.nullid
+        self._nodelen = self.nodeconstants.nodelen
         self._loadfunc = _noop
         self._copyfunc = _noop
         self._dirty = False
@@ -1323,7 +1330,7 @@
 
     def parse(self, text, readsubtree):
         selflazy = self._lazydirs
-        for f, n, fl in _parse(text):
+        for f, n, fl in _parse(self._nodelen, text):
             if fl == b't':
                 f = f + b'/'
                 # False below means "doesn't need to be copied" and can use the
@@ -1391,7 +1398,7 @@
                 continue
             subp1 = getnode(m1, d)
             subp2 = getnode(m2, d)
-            if subp1 == nullid:
+            if subp1 == self.nodeconstants.nullid:
                 subp1, subp2 = subp2, subp1
             writesubtree(subm, subp1, subp2, match)
 
@@ -1560,7 +1567,6 @@
         opener,
         tree=b'',
         dirlogcache=None,
-        indexfile=None,
         treemanifest=False,
     ):
         """Constructs a new manifest revlog
@@ -1591,10 +1597,9 @@
         if tree:
             assert self._treeondisk, b'opts is %r' % opts
 
-        if indexfile is None:
-            indexfile = b'00manifest.i'
-            if tree:
-                indexfile = b"meta/" + tree + indexfile
+        radix = b'00manifest'
+        if tree:
+            radix = b"meta/" + tree + radix
 
         self.tree = tree
 
@@ -1606,7 +1611,8 @@
 
         self._revlog = revlog.revlog(
             opener,
-            indexfile,
+            target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
+            radix=radix,
             # only root indexfile is cached
             checkambig=not bool(tree),
             mmaplargeindex=True,
@@ -1615,9 +1621,7 @@
         )
 
         self.index = self._revlog.index
-        self.version = self._revlog.version
         self._generaldelta = self._revlog._generaldelta
-        self._revlog.revlog_kind = b'manifest'
 
     def _setupmanifestcachehooks(self, repo):
         """Persist the manifestfulltextcache on lock release"""
@@ -1901,14 +1905,6 @@
         )
 
     @property
-    def indexfile(self):
-        return self._revlog.indexfile
-
-    @indexfile.setter
-    def indexfile(self, value):
-        self._revlog.indexfile = value
-
-    @property
     def opener(self):
         return self._revlog.opener
 
@@ -1994,7 +1990,7 @@
             else:
                 m = manifestctx(self, node)
 
-        if node != nullid:
+        if node != self.nodeconstants.nullid:
             mancache = self._dirmancache.get(tree)
             if not mancache:
                 mancache = util.lrucachedict(self._cachesize)
@@ -2020,7 +2016,7 @@
 class memmanifestctx(object):
     def __init__(self, manifestlog):
         self._manifestlog = manifestlog
-        self._manifestdict = manifestdict()
+        self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
 
     def _storage(self):
         return self._manifestlog.getstorage(b'')
@@ -2082,8 +2078,9 @@
 
     def read(self):
         if self._data is None:
-            if self._node == nullid:
-                self._data = manifestdict()
+            nc = self._manifestlog.nodeconstants
+            if self._node == nc.nullid:
+                self._data = manifestdict(nc.nodelen)
             else:
                 store = self._storage()
                 if self._node in store.fulltextcache:
@@ -2092,7 +2089,7 @@
                     text = store.revision(self._node)
                     arraytext = bytearray(text)
                     store.fulltextcache[self._node] = arraytext
-                self._data = manifestdict(text)
+                self._data = manifestdict(nc.nodelen, text)
         return self._data
 
     def readfast(self, shallow=False):
@@ -2119,7 +2116,7 @@
         store = self._storage()
         r = store.rev(self._node)
         d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
-        return manifestdict(d)
+        return manifestdict(store.nodeconstants.nodelen, d)
 
     def find(self, key):
         return self.read().find(key)
@@ -2188,7 +2185,7 @@
     def read(self):
         if self._data is None:
             store = self._storage()
-            if self._node == nullid:
+            if self._node == self._manifestlog.nodeconstants.nullid:
                 self._data = treemanifest(self._manifestlog.nodeconstants)
             # TODO accessing non-public API
             elif store._treeondisk:
@@ -2245,7 +2242,7 @@
         if shallow:
             r = store.rev(self._node)
             d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
-            return manifestdict(d)
+            return manifestdict(store.nodeconstants.nodelen, d)
         else:
             # Need to perform a slow delta
             r0 = store.deltaparent(store.rev(self._node))
@@ -2274,7 +2271,9 @@
             return self.readdelta(shallow=shallow)
 
         if shallow:
-            return manifestdict(store.revision(self._node))
+            return manifestdict(
+                store.nodeconstants.nodelen, store.revision(self._node)
+            )
         else:
             return self.read()
 
--- a/mercurial/merge.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/merge.py	Mon May 17 15:05:24 2021 +0200
@@ -13,12 +13,7 @@
 import struct
 
 from .i18n import _
-from .node import (
-    addednodeid,
-    modifiednodeid,
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from .thirdparty import attr
 from .utils import stringutil
 from . import (
@@ -779,7 +774,7 @@
         # to flag the change. If wctx is a committed revision, we shouldn't
         # care for the dirty state of the working directory.
         if any(wctx.sub(s).dirty() for s in wctx.substate):
-            m1[b'.hgsubstate'] = modifiednodeid
+            m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
 
     # Don't use m2-vs-ma optimization if:
     # - ma is the same as m1 or m2, which we're just going to diff again later
@@ -944,7 +939,7 @@
                             mresult.addcommitinfo(
                                 f, b'merge-removal-candidate', b'yes'
                             )
-                elif n1 == addednodeid:
+                elif n1 == repo.nodeconstants.addednodeid:
                     # This file was locally added. We should forget it instead of
                     # deleting it.
                     mresult.addfile(
@@ -1785,7 +1780,7 @@
     if (
         fsmonitorwarning
         and not fsmonitorenabled
-        and p1node == nullid
+        and p1node == repo.nullid
         and num_gets >= fsmonitorthreshold
         and pycompat.sysplatform.startswith((b'linux', b'darwin'))
     ):
@@ -1913,7 +1908,7 @@
         else:
             if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
                 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
-                pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
+                pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
             else:
                 pas = [p1.ancestor(p2, warn=branchmerge)]
 
@@ -2112,7 +2107,7 @@
 
         ### apply phase
         if not branchmerge:  # just jump to the new rev
-            fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
+            fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
         # If we're doing a partial update, we need to skip updating
         # the dirstate.
         always = matcher is None or matcher.always()
@@ -2281,14 +2276,14 @@
     if keepconflictparent and stats.unresolvedcount:
         pother = ctx.node()
     else:
-        pother = nullid
+        pother = repo.nullid
         parents = ctx.parents()
         if keepparent and len(parents) == 2 and base in parents:
             parents.remove(base)
             pother = parents[0].node()
     # Never set both parents equal to each other
     if pother == pctx.node():
-        pother = nullid
+        pother = repo.nullid
 
     if wctx.isinmemory():
         wctx.setparents(pctx.node(), pother)
--- a/mercurial/mergestate.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/mergestate.py	Mon May 17 15:05:24 2021 +0200
@@ -9,7 +9,6 @@
 from .node import (
     bin,
     hex,
-    nullhex,
     nullrev,
 )
 from . import (
@@ -32,7 +31,7 @@
 
 
 def _filectxorabsent(hexnode, ctx, f):
-    if hexnode == nullhex:
+    if hexnode == ctx.repo().nodeconstants.nullhex:
         return filemerge.absentfilectx(ctx, f)
     else:
         return ctx[f]
@@ -248,7 +247,7 @@
         note: also write the local version to the `.hg/merge` directory.
         """
         if fcl.isabsent():
-            localkey = nullhex
+            localkey = self._repo.nodeconstants.nullhex
         else:
             localkey = mergestate.getlocalkey(fcl.path())
             self._make_backup(fcl, localkey)
@@ -354,7 +353,7 @@
                 flags = flo
         if preresolve:
             # restore local
-            if localkey != nullhex:
+            if localkey != self._repo.nodeconstants.nullhex:
                 self._restore_backup(wctx[dfile], localkey, flags)
             else:
                 wctx[dfile].remove(ignoremissing=True)
@@ -658,7 +657,10 @@
                 records.append(
                     (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
                 )
-            elif v[1] == nullhex or v[6] == nullhex:
+            elif (
+                v[1] == self._repo.nodeconstants.nullhex
+                or v[6] == self._repo.nodeconstants.nullhex
+            ):
                 # Change/Delete or Delete/Change conflicts. These are stored in
                 # 'C' records. v[1] is the local file, and is nullhex when the
                 # file is deleted locally ('dc'). v[6] is the remote file, and
--- a/mercurial/metadata.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/metadata.py	Mon May 17 15:05:24 2021 +0200
@@ -11,14 +11,9 @@
 import multiprocessing
 import struct
 
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from . import (
     error,
-    pycompat,
-    requirements as requirementsmod,
     util,
 )
 
@@ -617,7 +612,7 @@
         if f in ctx:
             fctx = ctx[f]
             parents = fctx._filelog.parents(fctx._filenode)
-            if parents[1] != nullid:
+            if parents[1] != ctx.repo().nullid:
                 merged.append(f)
     return merged
 
@@ -822,26 +817,9 @@
 
 
 def copies_sidedata_computer(repo, revlog, rev, existing_sidedata):
-    return _getsidedata(repo, rev)[0]
-
-
-def set_sidedata_spec_for_repo(repo):
-    if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
-        repo.register_wanted_sidedata(sidedatamod.SD_FILES)
-        repo.register_sidedata_computer(
-            b"changelog",
-            sidedatamod.SD_FILES,
-            (sidedatamod.SD_FILES,),
-            copies_sidedata_computer,
-        )
-
-
-def getsidedataadder(srcrepo, destrepo):
-    use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
-    if pycompat.iswindows or not use_w:
-        return _get_simple_sidedata_adder(srcrepo, destrepo)
-    else:
-        return _get_worker_sidedata_adder(srcrepo, destrepo)
+    sidedata, has_copies_info = _getsidedata(repo, rev)
+    flags_to_add = sidedataflag.REVIDX_HASCOPIESINFO if has_copies_info else 0
+    return sidedata, (flags_to_add, 0)
 
 
 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
@@ -910,57 +888,21 @@
     # received, when shelve 43 for later use.
     staging = {}
 
-    def sidedata_companion(revlog, rev):
-        data = {}, False
-        if util.safehasattr(revlog, b'filteredrevs'):  # this is a changelog
-            # Is the data previously shelved ?
-            data = staging.pop(rev, None)
-            if data is None:
-                # look at the queued result until we find the one we are lookig
-                # for (shelve the other ones)
+    def sidedata_companion(repo, revlog, rev, old_sidedata):
+        # Is the data previously shelved ?
+        data = staging.pop(rev, None)
+        if data is None:
+            # look at the queued result until we find the one we are lookig
+            # for (shelve the other ones)
+            r, data = sidedataq.get()
+            while r != rev:
+                staging[r] = data
                 r, data = sidedataq.get()
-                while r != rev:
-                    staging[r] = data
-                    r, data = sidedataq.get()
-            tokens.release()
+        tokens.release()
         sidedata, has_copies_info = data
         new_flag = 0
         if has_copies_info:
             new_flag = sidedataflag.REVIDX_HASCOPIESINFO
-        return False, (), sidedata, new_flag, 0
+        return sidedata, (new_flag, 0)
 
     return sidedata_companion
-
-
-def _get_simple_sidedata_adder(srcrepo, destrepo):
-    """The simple version of the sidedata computation
-
-    It just compute it in the same thread on request"""
-
-    def sidedatacompanion(revlog, rev):
-        sidedata, has_copies_info = {}, False
-        if util.safehasattr(revlog, 'filteredrevs'):  # this is a changelog
-            sidedata, has_copies_info = _getsidedata(srcrepo, rev)
-        new_flag = 0
-        if has_copies_info:
-            new_flag = sidedataflag.REVIDX_HASCOPIESINFO
-
-        return False, (), sidedata, new_flag, 0
-
-    return sidedatacompanion
-
-
-def getsidedataremover(srcrepo, destrepo):
-    def sidedatacompanion(revlog, rev):
-        f = ()
-        if util.safehasattr(revlog, 'filteredrevs'):  # this is a changelog
-            if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
-                f = (
-                    sidedatamod.SD_P1COPIES,
-                    sidedatamod.SD_P2COPIES,
-                    sidedatamod.SD_FILESADDED,
-                    sidedatamod.SD_FILESREMOVED,
-                )
-        return False, f, {}, 0, sidedataflag.REVIDX_HASCOPIESINFO
-
-    return sidedatacompanion
--- a/mercurial/obsolete.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/obsolete.py	Mon May 17 15:05:24 2021 +0200
@@ -73,11 +73,14 @@
 import struct
 
 from .i18n import _
+from .node import (
+    bin,
+    hex,
+)
 from .pycompat import getattr
 from .node import (
     bin,
     hex,
-    nullid,
 )
 from . import (
     encoding,
@@ -103,6 +106,7 @@
 # Options for obsolescence
 createmarkersopt = b'createmarkers'
 allowunstableopt = b'allowunstable'
+allowdivergenceopt = b'allowdivergence'
 exchangeopt = b'exchange'
 
 
@@ -141,10 +145,13 @@
 
     createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
     unstablevalue = _getoptionvalue(repo, allowunstableopt)
+    divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
     exchangevalue = _getoptionvalue(repo, exchangeopt)
 
     # createmarkers must be enabled if other options are enabled
-    if (unstablevalue or exchangevalue) and not createmarkersvalue:
+    if (
+        unstablevalue or divergencevalue or exchangevalue
+    ) and not createmarkersvalue:
         raise error.Abort(
             _(
                 b"'createmarkers' obsolete option must be enabled "
@@ -155,6 +162,7 @@
     return {
         createmarkersopt: createmarkersvalue,
         allowunstableopt: unstablevalue,
+        allowdivergenceopt: divergencevalue,
         exchangeopt: exchangevalue,
     }
 
@@ -526,14 +534,14 @@
                 children.setdefault(p, set()).add(mark)
 
 
-def _checkinvalidmarkers(markers):
+def _checkinvalidmarkers(repo, markers):
     """search for marker with invalid data and raise error if needed
 
     Exist as a separated function to allow the evolve extension for a more
     subtle handling.
     """
     for mark in markers:
-        if nullid in mark[1]:
+        if repo.nullid in mark[1]:
             raise error.Abort(
                 _(
                     b'bad obsolescence marker detected: '
@@ -727,7 +735,7 @@
             return []
         self._version, markers = _readmarkers(data)
         markers = list(markers)
-        _checkinvalidmarkers(markers)
+        _checkinvalidmarkers(self.repo, markers)
         return markers
 
     @propertycache
@@ -761,7 +769,7 @@
             _addpredecessors(self.predecessors, markers)
         if self._cached('children'):
             _addchildren(self.children, markers)
-        _checkinvalidmarkers(markers)
+        _checkinvalidmarkers(self.repo, markers)
 
     def relevantmarkers(self, nodes):
         """return a set of all obsolescence markers relevant to a set of nodes.
--- a/mercurial/patch.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/patch.py	Mon May 17 15:05:24 2021 +0200
@@ -20,7 +20,7 @@
 from .i18n import _
 from .node import (
     hex,
-    nullhex,
+    sha1nodeconstants,
     short,
 )
 from .pycompat import open
@@ -3100,8 +3100,8 @@
 
     ctx1, fctx1, path1, flag1, content1, date1 = data1
     ctx2, fctx2, path2, flag2, content2, date2 = data2
-    index1 = _gitindex(content1) if path1 in ctx1 else nullhex
-    index2 = _gitindex(content2) if path2 in ctx2 else nullhex
+    index1 = _gitindex(content1) if path1 in ctx1 else sha1nodeconstants.nullhex
+    index2 = _gitindex(content2) if path2 in ctx2 else sha1nodeconstants.nullhex
     if binary and opts.git and not opts.nobinary:
         text = mdiff.b85diff(content1, content2)
         if text:
--- a/mercurial/phases.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/phases.py	Mon May 17 15:05:24 2021 +0200
@@ -109,7 +109,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
     wdirrev,
@@ -862,7 +861,7 @@
         node = bin(nhex)
         phase = int(phase)
         if phase == public:
-            if node != nullid:
+            if node != repo.nullid:
                 repo.ui.warn(
                     _(
                         b'ignoring inconsistent public root'
@@ -919,10 +918,10 @@
     rev = cl.index.get_rev
     if not roots:
         return heads
-    if not heads or heads == [nullid]:
+    if not heads or heads == [repo.nullid]:
         return []
     # The logic operated on revisions, convert arguments early for convenience
-    new_heads = {rev(n) for n in heads if n != nullid}
+    new_heads = {rev(n) for n in heads if n != repo.nullid}
     roots = [rev(n) for n in roots]
     # compute the area we need to remove
     affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
--- a/mercurial/policy.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/policy.py	Mon May 17 15:05:24 2021 +0200
@@ -80,7 +80,7 @@
     ('cext', 'bdiff'): 3,
     ('cext', 'mpatch'): 1,
     ('cext', 'osutil'): 4,
-    ('cext', 'parsers'): 17,
+    ('cext', 'parsers'): 18,
 }
 
 # map import request to other package or module
--- a/mercurial/pure/parsers.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/pure/parsers.py	Mon May 17 15:05:24 2021 +0200
@@ -10,7 +10,10 @@
 import struct
 import zlib
 
-from ..node import nullid, nullrev
+from ..node import (
+    nullrev,
+    sha1nodeconstants,
+)
 from .. import (
     pycompat,
     util,
@@ -50,7 +53,7 @@
     # Size of a C long int, platform independent
     int_size = struct.calcsize(b'>i')
     # An empty index entry, used as a default value to be overridden, or nullrev
-    null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+    null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
 
     @util.propertycache
     def entry_size(self):
@@ -64,7 +67,7 @@
 
     @util.propertycache
     def _nodemap(self):
-        nodemap = nodemaputil.NodeMap({nullid: nullrev})
+        nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
         for r in range(0, len(self)):
             n = self[r][7]
             nodemap[n] = r
@@ -101,9 +104,14 @@
     def append(self, tup):
         if '_nodemap' in vars(self):
             self._nodemap[tup[7]] = len(self)
-        data = self.index_format.pack(*tup)
+        data = self._pack_entry(tup)
         self._extra.append(data)
 
+    def _pack_entry(self, entry):
+        assert entry[8] == 0
+        assert entry[9] == 0
+        return self.index_format.pack(*entry[:8])
+
     def _check_index(self, i):
         if not isinstance(i, int):
             raise TypeError(b"expecting int indexes")
@@ -119,15 +127,37 @@
         else:
             index = self._calculate_index(i)
             data = self._data[index : index + self.entry_size]
-        r = self.index_format.unpack(data)
+        r = self._unpack_entry(data)
         if self._lgt and i == 0:
             r = (offset_type(0, gettype(r[0])),) + r[1:]
         return r
 
+    def _unpack_entry(self, data):
+        r = self.index_format.unpack(data)
+        r = r + (0, 0)
+        return r
+
+    def pack_header(self, header):
+        """pack header information as binary"""
+        v_fmt = revlog_constants.INDEX_HEADER
+        return v_fmt.pack(header)
+
+    def entry_binary(self, rev):
+        """return the raw binary string representing a revision"""
+        entry = self[rev]
+        p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
+        if rev == 0:
+            p = p[revlog_constants.INDEX_HEADER.size :]
+        return p
+
 
 class IndexObject(BaseIndexObject):
     def __init__(self, data):
-        assert len(data) % self.entry_size == 0
+        assert len(data) % self.entry_size == 0, (
+            len(data),
+            self.entry_size,
+            len(data) % self.entry_size,
+        )
         self._data = data
         self._lgt = len(data) // self.entry_size
         self._extra = []
@@ -246,9 +276,10 @@
 
 class Index2Mixin(object):
     index_format = revlog_constants.INDEX_ENTRY_V2
-    null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
 
-    def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
+    def replace_sidedata_info(
+        self, i, sidedata_offset, sidedata_length, offset_flags
+    ):
         """
         Replace an existing index entry's sidedata offset and length with new
         ones.
@@ -263,12 +294,27 @@
         if i >= self._lgt:
             packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
             old = self._extra[i - self._lgt]
-            new = old[:64] + packed + old[64 + packed_size :]
+            offset_flags = struct.pack(b">Q", offset_flags)
+            new = offset_flags + old[8:64] + packed + old[64 + packed_size :]
             self._extra[i - self._lgt] = new
         else:
             msg = b"cannot rewrite entries outside of this transaction"
             raise KeyError(msg)
 
+    def _unpack_entry(self, data):
+        return self.index_format.unpack(data)
+
+    def _pack_entry(self, entry):
+        return self.index_format.pack(*entry)
+
+    def entry_binary(self, rev):
+        """return the raw binary string representing a revision"""
+        entry = self[rev]
+        p = revlog_constants.INDEX_ENTRY_V2.pack(*entry)
+        if rev == 0:
+            p = p[revlog_constants.INDEX_HEADER.size :]
+        return p
+
 
 class IndexObject2(Index2Mixin, IndexObject):
     pass
--- a/mercurial/repoview.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/repoview.py	Mon May 17 15:05:24 2021 +0200
@@ -333,7 +333,7 @@
         r = super(filteredchangelogmixin, self).rev(node)
         if r in self.filteredrevs:
             raise error.FilteredLookupError(
-                hex(node), self.indexfile, _(b'filtered node')
+                hex(node), self.display_id, _(b'filtered node')
             )
         return r
 
--- a/mercurial/requirements.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/requirements.py	Mon May 17 15:05:24 2021 +0200
@@ -41,10 +41,6 @@
 # This is why once a repository has enabled sparse-read, it becomes required.
 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
 
-# A repository with the sidedataflag requirement will allow to store extra
-# information for revision without altering their original hashes.
-SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
-
 # A repository with the the copies-sidedata-changeset requirement will store
 # copies related information in changeset's sidedata.
 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
--- a/mercurial/revlog.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/revlog.py	Mon May 17 15:05:24 2021 +0200
@@ -26,24 +26,18 @@
 from .node import (
     bin,
     hex,
-    nullhex,
-    nullid,
     nullrev,
     sha1nodeconstants,
     short,
-    wdirfilenodeids,
-    wdirhex,
-    wdirid,
     wdirrev,
 )
 from .i18n import _
 from .pycompat import getattr
 from .revlogutils.constants import (
+    ALL_KINDS,
+    FEATURES_BY_VERSION,
     FLAG_GENERALDELTA,
     FLAG_INLINE_DATA,
-    INDEX_ENTRY_V0,
-    INDEX_ENTRY_V1,
-    INDEX_ENTRY_V2,
     INDEX_HEADER,
     REVLOGV0,
     REVLOGV1,
@@ -53,6 +47,7 @@
     REVLOG_DEFAULT_FLAGS,
     REVLOG_DEFAULT_FORMAT,
     REVLOG_DEFAULT_VERSION,
+    SUPPORTED_FLAGS,
 )
 from .revlogutils.flagutil import (
     REVIDX_DEFAULT_FLAGS,
@@ -62,7 +57,6 @@
     REVIDX_HASCOPIESINFO,
     REVIDX_ISCENSORED,
     REVIDX_RAWTEXT_CHANGING_FLAGS,
-    REVIDX_SIDEDATA,
 )
 from .thirdparty import attr
 from . import (
@@ -83,6 +77,7 @@
     deltas as deltautil,
     flagutil,
     nodemap as nodemaputil,
+    revlogv0,
     sidedata as sidedatautil,
 )
 from .utils import (
@@ -92,6 +87,7 @@
 
 # blanked usage of all the name to prevent pyflakes constraints
 # We need these name available in the module for extensions.
+
 REVLOGV0
 REVLOGV1
 REVLOGV2
@@ -104,7 +100,6 @@
 REVLOGV2_FLAGS
 REVIDX_ISCENSORED
 REVIDX_ELLIPSIS
-REVIDX_SIDEDATA
 REVIDX_HASCOPIESINFO
 REVIDX_EXTSTORED
 REVIDX_DEFAULT_FLAGS
@@ -143,14 +138,6 @@
 )
 
 
-def getoffset(q):
-    return int(q >> 16)
-
-
-def gettype(q):
-    return int(q & 0xFFFF)
-
-
 def offset_type(offset, type):
     if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
         raise ValueError(b'unknown revlog index flags')
@@ -210,6 +197,7 @@
     revision = attr.ib()
     delta = attr.ib()
     sidedata = attr.ib()
+    protocol_flags = attr.ib()
     linknode = attr.ib(default=None)
 
 
@@ -221,110 +209,32 @@
     node = attr.ib(default=None)
 
 
-class revlogoldindex(list):
-    entry_size = INDEX_ENTRY_V0.size
-
-    @property
-    def nodemap(self):
-        msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
-        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
-        return self._nodemap
-
-    @util.propertycache
-    def _nodemap(self):
-        nodemap = nodemaputil.NodeMap({nullid: nullrev})
-        for r in range(0, len(self)):
-            n = self[r][7]
-            nodemap[n] = r
-        return nodemap
-
-    def has_node(self, node):
-        """return True if the node exist in the index"""
-        return node in self._nodemap
-
-    def rev(self, node):
-        """return a revision for a node
-
-        If the node is unknown, raise a RevlogError"""
-        return self._nodemap[node]
-
-    def get_rev(self, node):
-        """return a revision for a node
-
-        If the node is unknown, return None"""
-        return self._nodemap.get(node)
-
-    def append(self, tup):
-        self._nodemap[tup[7]] = len(self)
-        super(revlogoldindex, self).append(tup)
-
-    def __delitem__(self, i):
-        if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
-            raise ValueError(b"deleting slices only supports a:-1 with step 1")
-        for r in pycompat.xrange(i.start, len(self)):
-            del self._nodemap[self[r][7]]
-        super(revlogoldindex, self).__delitem__(i)
-
-    def clearcaches(self):
-        self.__dict__.pop('_nodemap', None)
-
-    def __getitem__(self, i):
-        if i == -1:
-            return (0, 0, 0, -1, -1, -1, -1, nullid)
-        return list.__getitem__(self, i)
-
-
-class revlogoldio(object):
-    def parseindex(self, data, inline):
-        s = INDEX_ENTRY_V0.size
-        index = []
-        nodemap = nodemaputil.NodeMap({nullid: nullrev})
-        n = off = 0
-        l = len(data)
-        while off + s <= l:
-            cur = data[off : off + s]
-            off += s
-            e = INDEX_ENTRY_V0.unpack(cur)
-            # transform to revlogv1 format
-            e2 = (
-                offset_type(e[0], 0),
-                e[1],
-                -1,
-                e[2],
-                e[3],
-                nodemap.get(e[4], nullrev),
-                nodemap.get(e[5], nullrev),
-                e[6],
-            )
-            index.append(e2)
-            nodemap[e[6]] = n
-            n += 1
-
-        index = revlogoldindex(index)
-        return index, None
-
-    def packentry(self, entry, node, version, rev):
-        """return the binary representation of an entry
-
-        entry:   a tuple containing all the values (see index.__getitem__)
-        node:    a callback to convert a revision to nodeid
-        version: the changelog version
-        rev:     the revision number
-        """
-        if gettype(entry[0]):
-            raise error.RevlogError(
-                _(b'index entry flags need revlog version 1')
-            )
-        e2 = (
-            getoffset(entry[0]),
-            entry[1],
-            entry[3],
-            entry[4],
-            node(entry[5]),
-            node(entry[6]),
-            entry[7],
-        )
-        return INDEX_ENTRY_V0.pack(*e2)
+def parse_index_v1(data, inline):
+    # call the C implementation to parse the index data
+    index, cache = parsers.parse_index2(data, inline)
+    return index, cache
+
+
+def parse_index_v2(data, inline):
+    # call the C implementation to parse the index data
+    index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+    return index, cache
+
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+    def parse_index_v1_nodemap(data, inline):
+        index, cache = parsers.parse_index_devel_nodemap(data, inline)
+        return index, cache
+
+
+else:
+    parse_index_v1_nodemap = None
+
+
+def parse_index_v1_mixed(data, inline):
+    index, cache = parse_index_v1(data, inline)
+    return rustrevlog.MixedIndex(index), cache
 
 
 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
@@ -332,52 +242,6 @@
 _maxentrysize = 0x7FFFFFFF
 
 
-class revlogio(object):
-    def parseindex(self, data, inline):
-        # call the C implementation to parse the index data
-        index, cache = parsers.parse_index2(data, inline)
-        return index, cache
-
-    def packentry(self, entry, node, version, rev):
-        p = INDEX_ENTRY_V1.pack(*entry)
-        if rev == 0:
-            p = INDEX_HEADER.pack(version) + p[4:]
-        return p
-
-
-class revlogv2io(object):
-    def parseindex(self, data, inline):
-        index, cache = parsers.parse_index2(data, inline, revlogv2=True)
-        return index, cache
-
-    def packentry(self, entry, node, version, rev):
-        p = INDEX_ENTRY_V2.pack(*entry)
-        if rev == 0:
-            p = INDEX_HEADER.pack(version) + p[4:]
-        return p
-
-
-NodemapRevlogIO = None
-
-if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
-
-    class NodemapRevlogIO(revlogio):
-        """A debug oriented IO class that return a PersistentNodeMapIndexObject
-
-        The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
-        """
-
-        def parseindex(self, data, inline):
-            index, cache = parsers.parse_index_devel_nodemap(data, inline)
-            return index, cache
-
-
-class rustrevlogio(revlogio):
-    def parseindex(self, data, inline):
-        index, cache = super(rustrevlogio, self).parseindex(data, inline)
-        return rustrevlog.MixedIndex(index), cache
-
-
 class revlog(object):
     """
     the underlying revision storage object
@@ -426,8 +290,9 @@
     def __init__(
         self,
         opener,
-        indexfile,
-        datafile=None,
+        target,
+        radix,
+        postfix=None,
         checkambig=False,
         mmaplargeindex=False,
         censorable=False,
@@ -441,17 +306,28 @@
         opener is a function that abstracts the file opening operation
         and can be used to implement COW semantics or the like.
 
+        `target`: a (KIND, ID) tuple that identify the content stored in
+        this revlog. It help the rest of the code to understand what the revlog
+        is about without having to resort to heuristic and index filename
+        analysis. Note: that this must be reliably be set by normal code, but
+        that test, debug, or performance measurement code might not set this to
+        accurate value.
         """
         self.upperboundcomp = upperboundcomp
-        self.indexfile = indexfile
-        self.datafile = datafile or (indexfile[:-2] + b".d")
-        self.nodemap_file = None
+
+        self.radix = radix
+
+        self._indexfile = None
+        self._datafile = None
+        self._nodemap_file = None
+        self.postfix = postfix
+        self.opener = opener
         if persistentnodemap:
-            self.nodemap_file = nodemaputil.get_nodemap_file(
-                opener, self.indexfile
-            )
-
-        self.opener = opener
+            self._nodemap_file = nodemaputil.get_nodemap_file(self)
+
+        assert target[0] in ALL_KINDS
+        assert len(target) == 2
+        self.target = target
         #  When True, indexfile is opened with checkambig=True at writing, to
         #  avoid file stat ambiguity.
         self._checkambig = checkambig
@@ -477,6 +353,7 @@
         self._maxdeltachainspan = -1
         self._withsparseread = False
         self._sparserevlog = False
+        self.hassidedata = False
         self._srdensitythreshold = 0.50
         self._srmingapsize = 262144
 
@@ -486,25 +363,42 @@
 
         # 2-tuple of file handles being used for active writing.
         self._writinghandles = None
+        # prevent nesting of addgroup
+        self._adding_group = None
 
         self._loadindex()
 
         self._concurrencychecker = concurrencychecker
 
-    def _loadindex(self):
+    def _init_opts(self):
+        """process options (from above/config) to setup associated default revlog mode
+
+        These values might be affected when actually reading on disk information.
+
+        The relevant values are returned for use in _loadindex().
+
+        * newversionflags:
+            version header to use if we need to create a new revlog
+
+        * mmapindexthreshold:
+            minimal index size for start to use mmap
+
+        * force_nodemap:
+            force the usage of a "development" version of the nodemap code
+        """
         mmapindexthreshold = None
         opts = self.opener.options
 
         if b'revlogv2' in opts:
-            newversionflags = REVLOGV2 | FLAG_INLINE_DATA
+            new_header = REVLOGV2 | FLAG_INLINE_DATA
         elif b'revlogv1' in opts:
-            newversionflags = REVLOGV1 | FLAG_INLINE_DATA
+            new_header = REVLOGV1 | FLAG_INLINE_DATA
             if b'generaldelta' in opts:
-                newversionflags |= FLAG_GENERALDELTA
+                new_header |= FLAG_GENERALDELTA
         elif b'revlogv0' in self.opener.options:
-            newversionflags = REVLOGV0
+            new_header = REVLOGV0
         else:
-            newversionflags = REVLOG_DEFAULT_VERSION
+            new_header = REVLOG_DEFAULT_VERSION
 
         if b'chunkcachesize' in opts:
             self._chunkcachesize = opts[b'chunkcachesize']
@@ -526,7 +420,6 @@
             self._maxdeltachainspan = opts[b'maxdeltachainspan']
         if self._mmaplargeindex and b'mmapindexthreshold' in opts:
             mmapindexthreshold = opts[b'mmapindexthreshold']
-        self.hassidedata = bool(opts.get(b'side-data', False))
         self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
         withsparseread = bool(opts.get(b'with-sparse-read', False))
         # sparse-revlog forces sparse-read
@@ -554,75 +447,71 @@
                 _(b'revlog chunk cache size %r is not a power of 2')
                 % self._chunkcachesize
             )
-
-        indexdata = b''
-        self._initempty = True
+        force_nodemap = opts.get(b'devel-force-nodemap', False)
+        return new_header, mmapindexthreshold, force_nodemap
+
+    def _get_data(self, filepath, mmap_threshold):
+        """return a file content with or without mmap
+
+        If the file is missing return the empty string"""
         try:
-            with self._indexfp() as f:
-                if (
-                    mmapindexthreshold is not None
-                    and self.opener.fstat(f).st_size >= mmapindexthreshold
-                ):
-                    # TODO: should .close() to release resources without
-                    # relying on Python GC
-                    indexdata = util.buffer(util.mmapread(f))
-                else:
-                    indexdata = f.read()
-            if len(indexdata) > 0:
-                versionflags = INDEX_HEADER.unpack(indexdata[:4])[0]
-                self._initempty = False
-            else:
-                versionflags = newversionflags
+            with self.opener(filepath) as fp:
+                if mmap_threshold is not None:
+                    file_size = self.opener.fstat(fp).st_size
+                    if file_size >= mmap_threshold:
+                        # TODO: should .close() to release resources without
+                        # relying on Python GC
+                        return util.buffer(util.mmapread(fp))
+                return fp.read()
         except IOError as inst:
             if inst.errno != errno.ENOENT:
                 raise
-
-            versionflags = newversionflags
-
-        self.version = versionflags
-
-        flags = versionflags & ~0xFFFF
-        fmt = versionflags & 0xFFFF
-
-        if fmt == REVLOGV0:
-            if flags:
-                raise error.RevlogError(
-                    _(b'unknown flags (%#04x) in version %d revlog %s')
-                    % (flags >> 16, fmt, self.indexfile)
-                )
-
-            self._inline = False
-            self._generaldelta = False
-
-        elif fmt == REVLOGV1:
-            if flags & ~REVLOGV1_FLAGS:
-                raise error.RevlogError(
-                    _(b'unknown flags (%#04x) in version %d revlog %s')
-                    % (flags >> 16, fmt, self.indexfile)
-                )
-
-            self._inline = versionflags & FLAG_INLINE_DATA
-            self._generaldelta = versionflags & FLAG_GENERALDELTA
-
-        elif fmt == REVLOGV2:
-            if flags & ~REVLOGV2_FLAGS:
-                raise error.RevlogError(
-                    _(b'unknown flags (%#04x) in version %d revlog %s')
-                    % (flags >> 16, fmt, self.indexfile)
-                )
-
-            # There is a bug in the transaction handling when going from an
-            # inline revlog to a separate index and data file. Turn it off until
-            # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
-            # See issue6485
-            self._inline = False
-            # generaldelta implied by version 2 revlogs.
-            self._generaldelta = True
-
+            return b''
+
+    def _loadindex(self):
+
+        new_header, mmapindexthreshold, force_nodemap = self._init_opts()
+
+        if self.postfix is None:
+            entry_point = b'%s.i' % self.radix
+        else:
+            entry_point = b'%s.i.%s' % (self.radix, self.postfix)
+
+        entry_data = b''
+        self._initempty = True
+        entry_data = self._get_data(entry_point, mmapindexthreshold)
+        if len(entry_data) > 0:
+            header = INDEX_HEADER.unpack(entry_data[:4])[0]
+            self._initempty = False
         else:
-            raise error.RevlogError(
-                _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
-            )
+            header = new_header
+
+        self._format_flags = header & ~0xFFFF
+        self._format_version = header & 0xFFFF
+
+        supported_flags = SUPPORTED_FLAGS.get(self._format_version)
+        if supported_flags is None:
+            msg = _(b'unknown version (%d) in revlog %s')
+            msg %= (self._format_version, self.display_id)
+            raise error.RevlogError(msg)
+        elif self._format_flags & ~supported_flags:
+            msg = _(b'unknown flags (%#04x) in version %d revlog %s')
+            display_flag = self._format_flags >> 16
+            msg %= (display_flag, self._format_version, self.display_id)
+            raise error.RevlogError(msg)
+
+        features = FEATURES_BY_VERSION[self._format_version]
+        self._inline = features[b'inline'](self._format_flags)
+        self._generaldelta = features[b'generaldelta'](self._format_flags)
+        self.hassidedata = features[b'sidedata']
+
+        index_data = entry_data
+        self._indexfile = entry_point
+
+        if self.postfix is None or self.postfix == b'a':
+            self._datafile = b'%s.d' % self.radix
+        else:
+            self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
 
         self.nodeconstants = sha1nodeconstants
         self.nullid = self.nodeconstants.nullid
@@ -634,33 +523,33 @@
         self._storedeltachains = True
 
         devel_nodemap = (
-            self.nodemap_file
-            and opts.get(b'devel-force-nodemap', False)
-            and NodemapRevlogIO is not None
+            self._nodemap_file
+            and force_nodemap
+            and parse_index_v1_nodemap is not None
         )
 
         use_rust_index = False
         if rustrevlog is not None:
-            if self.nodemap_file is not None:
+            if self._nodemap_file is not None:
                 use_rust_index = True
             else:
                 use_rust_index = self.opener.options.get(b'rust.index')
 
-        self._io = revlogio()
-        if self.version == REVLOGV0:
-            self._io = revlogoldio()
-        elif fmt == REVLOGV2:
-            self._io = revlogv2io()
+        self._parse_index = parse_index_v1
+        if self._format_version == REVLOGV0:
+            self._parse_index = revlogv0.parse_index_v0
+        elif self._format_version == REVLOGV2:
+            self._parse_index = parse_index_v2
         elif devel_nodemap:
-            self._io = NodemapRevlogIO()
+            self._parse_index = parse_index_v1_nodemap
         elif use_rust_index:
-            self._io = rustrevlogio()
+            self._parse_index = parse_index_v1_mixed
         try:
-            d = self._io.parseindex(indexdata, self._inline)
+            d = self._parse_index(index_data, self._inline)
             index, _chunkcache = d
             use_nodemap = (
                 not self._inline
-                and self.nodemap_file is not None
+                and self._nodemap_file is not None
                 and util.safehasattr(index, 'update_nodemap_data')
             )
             if use_nodemap:
@@ -676,7 +565,7 @@
                         index.update_nodemap_data(*nodemap_data)
         except (ValueError, IndexError):
             raise error.RevlogError(
-                _(b"index %s is corrupted") % self.indexfile
+                _(b"index %s is corrupted") % self.display_id
             )
         self.index, self._chunkcache = d
         if not self._chunkcache:
@@ -687,22 +576,52 @@
         self._decompressors = {}
 
     @util.propertycache
+    def revlog_kind(self):
+        return self.target[0]
+
+    @util.propertycache
+    def display_id(self):
+        """The public facing "ID" of the revlog that we use in message"""
+        # Maybe we should build a user facing representation of
+        # revlog.target instead of using `self.radix`
+        return self.radix
+
+    @util.propertycache
     def _compressor(self):
         engine = util.compengines[self._compengine]
         return engine.revlogcompressor(self._compengineopts)
 
-    def _indexfp(self, mode=b'r'):
+    def _indexfp(self):
         """file object for the revlog's index file"""
-        args = {'mode': mode}
-        if mode != b'r':
-            args['checkambig'] = self._checkambig
-        if mode == b'w':
-            args['atomictemp'] = True
-        return self.opener(self.indexfile, **args)
+        return self.opener(self._indexfile, mode=b"r")
+
+    def __index_write_fp(self):
+        # You should not use this directly and use `_writing` instead
+        try:
+            f = self.opener(
+                self._indexfile, mode=b"r+", checkambig=self._checkambig
+            )
+            f.seek(0, os.SEEK_END)
+            return f
+        except IOError as inst:
+            if inst.errno != errno.ENOENT:
+                raise
+            return self.opener(
+                self._indexfile, mode=b"w+", checkambig=self._checkambig
+            )
+
+    def __index_new_fp(self):
+        # You should not use this unless you are upgrading from inline revlog
+        return self.opener(
+            self._indexfile,
+            mode=b"w",
+            checkambig=self._checkambig,
+            atomictemp=True,
+        )
 
     def _datafp(self, mode=b'r'):
         """file object for the revlog's data file"""
-        return self.opener(self.datafile, mode=mode)
+        return self.opener(self._datafile, mode=mode)
 
     @contextlib.contextmanager
     def _datareadfp(self, existingfp=None):
@@ -785,7 +704,7 @@
         return True
 
     def update_caches(self, transaction):
-        if self.nodemap_file is not None:
+        if self._nodemap_file is not None:
             if transaction is None:
                 nodemaputil.update_persistent_nodemap(self)
             else:
@@ -802,7 +721,7 @@
         # end up having to refresh it here.
         use_nodemap = (
             not self._inline
-            and self.nodemap_file is not None
+            and self._nodemap_file is not None
             and util.safehasattr(self.index, 'update_nodemap_data')
         )
         if use_nodemap:
@@ -818,9 +737,12 @@
             raise
         except error.RevlogError:
             # parsers.c radix tree lookup failed
-            if node == wdirid or node in wdirfilenodeids:
+            if (
+                node == self.nodeconstants.wdirid
+                or node in self.nodeconstants.wdirfilenodeids
+            ):
                 raise error.WdirUnsupported
-            raise error.LookupError(node, self.indexfile, _(b'no node'))
+            raise error.LookupError(node, self.display_id, _(b'no node'))
 
     # Accessors for index entries.
 
@@ -836,7 +758,7 @@
         return self.index[rev][1]
 
     def sidedata_length(self, rev):
-        if self.version & 0xFFFF != REVLOGV2:
+        if not self.hassidedata:
             return 0
         return self.index[rev][9]
 
@@ -909,7 +831,7 @@
         i = self.index
         d = i[self.rev(node)]
         # inline node() to avoid function call overhead
-        if d[5] == nullid:
+        if d[5] == self.nullid:
             return i[d[6]][7], i[d[5]][7]
         else:
             return i[d[5]][7], i[d[6]][7]
@@ -1027,7 +949,7 @@
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
         if common is None:
-            common = [nullid]
+            common = [self.nullid]
         if heads is None:
             heads = self.heads()
 
@@ -1133,7 +1055,7 @@
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
         if common is None:
-            common = [nullid]
+            common = [self.nullid]
         if heads is None:
             heads = self.heads()
 
@@ -1171,11 +1093,15 @@
                 return nonodes
             lowestrev = min([self.rev(n) for n in roots])
         else:
-            roots = [nullid]  # Everybody's a descendant of nullid
+            roots = [self.nullid]  # Everybody's a descendant of nullid
             lowestrev = nullrev
         if (lowestrev == nullrev) and (heads is None):
             # We want _all_ the nodes!
-            return ([self.node(r) for r in self], [nullid], list(self.heads()))
+            return (
+                [self.node(r) for r in self],
+                [self.nullid],
+                list(self.heads()),
+            )
         if heads is None:
             # All nodes are ancestors, so the latest ancestor is the last
             # node.
@@ -1201,7 +1127,7 @@
                 # grab a node to tag
                 n = nodestotag.pop()
                 # Never tag nullid
-                if n == nullid:
+                if n == self.nullid:
                     continue
                 # A node's revision number represents its place in a
                 # topologically sorted list of nodes.
@@ -1213,7 +1139,7 @@
                         ancestors.add(n)  # Mark as ancestor
                         # Add non-nullid parents to list of nodes to tag.
                         nodestotag.update(
-                            [p for p in self.parents(n) if p != nullid]
+                            [p for p in self.parents(n) if p != self.nullid]
                         )
                     elif n in heads:  # We've seen it before, is it a fake head?
                         # So it is, real heads should not be the ancestors of
@@ -1241,7 +1167,7 @@
                 # We are descending from nullid, and don't need to care about
                 # any other roots.
                 lowestrev = nullrev
-                roots = [nullid]
+                roots = [self.nullid]
         # Transform our roots list into a set.
         descendants = set(roots)
         # Also, keep the original roots so we can filter out roots that aren't
@@ -1335,7 +1261,7 @@
         """
         if start is None and stop is None:
             if not len(self):
-                return [nullid]
+                return [self.nullid]
             return [self.node(r) for r in self.headrevs()]
 
         if start is None:
@@ -1425,13 +1351,13 @@
         if ancs:
             # choose a consistent winner when there's a tie
             return min(map(self.node, ancs))
-        return nullid
+        return self.nullid
 
     def _match(self, id):
         if isinstance(id, int):
             # rev
             return self.node(id)
-        if len(id) == 20:
+        if len(id) == self.nodeconstants.nodelen:
             # possibly a binary node
             # odds of a binary node being all hex in ASCII are 1 in 10**25
             try:
@@ -1452,7 +1378,7 @@
             return self.node(rev)
         except (ValueError, OverflowError):
             pass
-        if len(id) == 40:
+        if len(id) == 2 * self.nodeconstants.nodelen:
             try:
                 # a full hex nodeid?
                 node = bin(id)
@@ -1463,7 +1389,7 @@
 
     def _partialmatch(self, id):
         # we don't care wdirfilenodeids as they should be always full hash
-        maybewdir = wdirhex.startswith(id)
+        maybewdir = self.nodeconstants.wdirhex.startswith(id)
         try:
             partial = self.index.partialmatch(id)
             if partial and self.hasnode(partial):
@@ -1480,7 +1406,7 @@
             # fast path: for unfiltered changelog, radix tree is accurate
             if not getattr(self, 'filteredrevs', None):
                 raise error.AmbiguousPrefixLookupError(
-                    id, self.indexfile, _(b'ambiguous identifier')
+                    id, self.display_id, _(b'ambiguous identifier')
                 )
             # fall through to slow path that filters hidden revisions
         except (AttributeError, ValueError):
@@ -1499,14 +1425,14 @@
                 nl = [
                     n for n in nl if hex(n).startswith(id) and self.hasnode(n)
                 ]
-                if nullhex.startswith(id):
-                    nl.append(nullid)
+                if self.nodeconstants.nullhex.startswith(id):
+                    nl.append(self.nullid)
                 if len(nl) > 0:
                     if len(nl) == 1 and not maybewdir:
                         self._pcache[id] = nl[0]
                         return nl[0]
                     raise error.AmbiguousPrefixLookupError(
-                        id, self.indexfile, _(b'ambiguous identifier')
+                        id, self.display_id, _(b'ambiguous identifier')
                     )
                 if maybewdir:
                     raise error.WdirUnsupported
@@ -1526,7 +1452,7 @@
         if n:
             return n
 
-        raise error.LookupError(id, self.indexfile, _(b'no match found'))
+        raise error.LookupError(id, self.display_id, _(b'no match found'))
 
     def shortest(self, node, minlength=1):
         """Find the shortest unambiguous prefix that matches node."""
@@ -1540,7 +1466,7 @@
                 # single 'ff...' match
                 return True
             if matchednode is None:
-                raise error.LookupError(node, self.indexfile, _(b'no node'))
+                raise error.LookupError(node, self.display_id, _(b'no node'))
             return True
 
         def maybewdir(prefix):
@@ -1560,13 +1486,15 @@
                 length = max(self.index.shortest(node), minlength)
                 return disambiguate(hexnode, length)
             except error.RevlogError:
-                if node != wdirid:
-                    raise error.LookupError(node, self.indexfile, _(b'no node'))
+                if node != self.nodeconstants.wdirid:
+                    raise error.LookupError(
+                        node, self.display_id, _(b'no node')
+                    )
             except AttributeError:
                 # Fall through to pure code
                 pass
 
-        if node == wdirid:
+        if node == self.nodeconstants.wdirid:
             for length in range(minlength, len(hexnode) + 1):
                 prefix = hexnode[:length]
                 if isvalid(prefix):
@@ -1632,9 +1560,9 @@
                         b'offset %d, got %d'
                     )
                     % (
-                        self.indexfile if self._inline else self.datafile,
+                        self._indexfile if self._inline else self._datafile,
                         length,
-                        realoffset,
+                        offset,
                         len(d) - startoffset,
                     )
                 )
@@ -1648,7 +1576,7 @@
                     b'%d, got %d'
                 )
                 % (
-                    self.indexfile if self._inline else self.datafile,
+                    self._indexfile if self._inline else self._datafile,
                     length,
                     offset,
                     len(d),
@@ -1881,14 +1809,14 @@
             rev = None
 
         # fast path the special `nullid` rev
-        if node == nullid:
+        if node == self.nullid:
             return b"", {}
 
         # ``rawtext`` is the text as stored inside the revlog. Might be the
         # revision or might need to be processed to retrieve the revision.
         rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
 
-        if self.version & 0xFFFF == REVLOGV2:
+        if self.hassidedata:
             if rev is None:
                 rev = self.rev(node)
             sidedata = self._sidedata(rev)
@@ -2019,14 +1947,14 @@
                     revornode = templatefilters.short(hex(node))
                 raise error.RevlogError(
                     _(b"integrity check failed on %s:%s")
-                    % (self.indexfile, pycompat.bytestr(revornode))
+                    % (self.display_id, pycompat.bytestr(revornode))
                 )
         except error.RevlogError:
             if self._censorable and storageutil.iscensoredtext(text):
-                raise error.CensoredNodeError(self.indexfile, node, text)
+                raise error.CensoredNodeError(self.display_id, node, text)
             raise
 
-    def _enforceinlinesize(self, tr, fp=None):
+    def _enforceinlinesize(self, tr):
         """Check if the revlog is too big for inline and convert if so.
 
         This should be called after revisions are added to the revlog. If the
@@ -2034,51 +1962,105 @@
         to use multiple index and data files.
         """
         tiprev = len(self) - 1
-        if (
-            not self._inline
-            or (self.start(tiprev) + self.length(tiprev)) < _maxinline
-        ):
+        total_size = self.start(tiprev) + self.length(tiprev)
+        if not self._inline or total_size < _maxinline:
             return
 
-        troffset = tr.findoffset(self.indexfile)
+        troffset = tr.findoffset(self._indexfile)
         if troffset is None:
             raise error.RevlogError(
-                _(b"%s not found in the transaction") % self.indexfile
+                _(b"%s not found in the transaction") % self._indexfile
             )
         trindex = 0
-        tr.add(self.datafile, 0)
-
-        if fp:
+        tr.add(self._datafile, 0)
+
+        existing_handles = False
+        if self._writinghandles is not None:
+            existing_handles = True
+            fp = self._writinghandles[0]
             fp.flush()
             fp.close()
             # We can't use the cached file handle after close(). So prevent
             # its usage.
             self._writinghandles = None
 
-        with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
-            for r in self:
-                dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
-                if troffset <= self.start(r):
-                    trindex = r
-
-        with self._indexfp(b'w') as fp:
-            self.version &= ~FLAG_INLINE_DATA
-            self._inline = False
-            io = self._io
-            for i in self:
-                e = io.packentry(self.index[i], self.node, self.version, i)
-                fp.write(e)
-
-            # the temp file replace the real index when we exit the context
-            # manager
-
-        tr.replace(self.indexfile, trindex * self.index.entry_size)
-        nodemaputil.setup_persistent_nodemap(tr, self)
-        self._chunkclear()
+        new_dfh = self._datafp(b'w+')
+        new_dfh.truncate(0)  # drop any potentially existing data
+        try:
+            with self._indexfp() as read_ifh:
+                for r in self:
+                    new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
+                    if troffset <= self.start(r):
+                        trindex = r
+                new_dfh.flush()
+
+            with self.__index_new_fp() as fp:
+                self._format_flags &= ~FLAG_INLINE_DATA
+                self._inline = False
+                for i in self:
+                    e = self.index.entry_binary(i)
+                    if i == 0:
+                        header = self._format_flags | self._format_version
+                        header = self.index.pack_header(header)
+                        e = header + e
+                    fp.write(e)
+                # the temp file replace the real index when we exit the context
+                # manager
+
+            tr.replace(self._indexfile, trindex * self.index.entry_size)
+            nodemaputil.setup_persistent_nodemap(tr, self)
+            self._chunkclear()
+
+            if existing_handles:
+                # switched from inline to conventional reopen the index
+                ifh = self.__index_write_fp()
+                self._writinghandles = (ifh, new_dfh)
+                new_dfh = None
+        finally:
+            if new_dfh is not None:
+                new_dfh.close()
 
     def _nodeduplicatecallback(self, transaction, node):
         """called when trying to add a node already stored."""
 
+    @contextlib.contextmanager
+    def _writing(self, transaction):
+        if self._writinghandles is not None:
+            yield
+        else:
+            r = len(self)
+            dsize = 0
+            if r:
+                dsize = self.end(r - 1)
+            dfh = None
+            if not self._inline:
+                try:
+                    dfh = self._datafp(b"r+")
+                    dfh.seek(0, os.SEEK_END)
+                except IOError as inst:
+                    if inst.errno != errno.ENOENT:
+                        raise
+                    dfh = self._datafp(b"w+")
+                transaction.add(self._datafile, dsize)
+            try:
+                isize = r * self.index.entry_size
+                ifh = self.__index_write_fp()
+                if self._inline:
+                    transaction.add(self._indexfile, dsize + isize)
+                else:
+                    transaction.add(self._indexfile, isize)
+                try:
+                    self._writinghandles = (ifh, dfh)
+                    try:
+                        yield
+                    finally:
+                        self._writinghandles = None
+                finally:
+                    ifh.close()
+            finally:
+                if dfh is not None:
+                    dfh.close()
+
     def addrevision(
         self,
         text,
@@ -2108,12 +2090,12 @@
         """
         if link == nullrev:
             raise error.RevlogError(
-                _(b"attempted to add linkrev -1 to %s") % self.indexfile
+                _(b"attempted to add linkrev -1 to %s") % self.display_id
             )
 
         if sidedata is None:
             sidedata = {}
-        elif not self.hassidedata:
+        elif sidedata and not self.hassidedata:
             raise error.ProgrammingError(
                 _(b"trying to add sidedata to a revlog who don't support them")
             )
@@ -2133,7 +2115,7 @@
                 _(
                     b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
                 )
-                % (self.indexfile, len(rawtext))
+                % (self.display_id, len(rawtext))
             )
 
         node = node or self.hash(rawtext, p1, p2)
@@ -2174,11 +2156,7 @@
         useful when reusing a revision not stored in this revlog (ex: received
         over wire, or read from an external bundle).
         """
-        dfh = None
-        if not self._inline:
-            dfh = self._datafp(b"a+")
-        ifh = self._indexfp(b"a+")
-        try:
+        with self._writing(transaction):
             return self._addrevision(
                 node,
                 rawtext,
@@ -2188,15 +2166,9 @@
                 p2,
                 flags,
                 cachedelta,
-                ifh,
-                dfh,
                 deltacomputer=deltacomputer,
                 sidedata=sidedata,
             )
-        finally:
-            if dfh:
-                dfh.close()
-            ifh.close()
 
     def compress(self, data):
         """Generate a possibly-compressed representation of data."""
@@ -2283,8 +2255,6 @@
         p2,
         flags,
         cachedelta,
-        ifh,
-        dfh,
         alwayscache=False,
         deltacomputer=None,
         sidedata=None,
@@ -2302,19 +2272,25 @@
         - rawtext is optional (can be None); if not set, cachedelta must be set.
           if both are set, they must correspond to each other.
         """
-        if node == nullid:
+        if node == self.nullid:
             raise error.RevlogError(
-                _(b"%s: attempt to add null revision") % self.indexfile
+                _(b"%s: attempt to add null revision") % self.display_id
             )
-        if node == wdirid or node in wdirfilenodeids:
+        if (
+            node == self.nodeconstants.wdirid
+            or node in self.nodeconstants.wdirfilenodeids
+        ):
             raise error.RevlogError(
-                _(b"%s: attempt to add wdir revision") % self.indexfile
+                _(b"%s: attempt to add wdir revision") % self.display_id
             )
+        if self._writinghandles is None:
+            msg = b'adding revision outside `revlog._writing` context'
+            raise error.ProgrammingError(msg)
 
         if self._inline:
-            fh = ifh
+            fh = self._writinghandles[0]
         else:
-            fh = dfh
+            fh = self._writinghandles[1]
 
         btext = [rawtext]
 
@@ -2324,18 +2300,19 @@
         offset = self._get_data_offset(prev)
 
         if self._concurrencychecker:
+            ifh, dfh = self._writinghandles
             if self._inline:
                 # offset is "as if" it were in the .d file, so we need to add on
                 # the size of the entry metadata.
                 self._concurrencychecker(
-                    ifh, self.indexfile, offset + curr * self.index.entry_size
+                    ifh, self._indexfile, offset + curr * self.index.entry_size
                 )
             else:
                 # Entries in the .i are a consistent size.
                 self._concurrencychecker(
-                    ifh, self.indexfile, curr * self.index.entry_size
+                    ifh, self._indexfile, curr * self.index.entry_size
                 )
-                self._concurrencychecker(dfh, self.datafile, offset)
+                self._concurrencychecker(dfh, self._datafile, offset)
 
         p1r, p2r = self.rev(p1), self.rev(p2)
 
@@ -2358,7 +2335,7 @@
 
         deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
 
-        if sidedata:
+        if sidedata and self.hassidedata:
             serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
             sidedata_offset = offset + deltainfo.deltalen
         else:
@@ -2381,15 +2358,14 @@
             len(serialized_sidedata),
         )
 
-        if self.version & 0xFFFF != REVLOGV2:
-            e = e[:8]
-
         self.index.append(e)
-        entry = self._io.packentry(e, self.node, self.version, curr)
+        entry = self.index.entry_binary(curr)
+        if curr == 0:
+            header = self._format_flags | self._format_version
+            header = self.index.pack_header(header)
+            entry = header + entry
         self._writeentry(
             transaction,
-            ifh,
-            dfh,
             entry,
             deltainfo.data,
             link,
@@ -2416,7 +2392,7 @@
         to `n - 1`'s sidedata being written after `n`'s data.
 
         TODO cache this in a docket file before getting out of experimental."""
-        if self.version & 0xFFFF != REVLOGV2:
+        if self._format_version != REVLOGV2:
             return self.end(prev)
 
         offset = 0
@@ -2427,9 +2403,7 @@
             offset = max(self.end(rev), offset, sidedata_end)
         return offset
 
-    def _writeentry(
-        self, transaction, ifh, dfh, entry, data, link, offset, sidedata
-    ):
+    def _writeentry(self, transaction, entry, data, link, offset, sidedata):
         # Files opened in a+ mode have inconsistent behavior on various
         # platforms. Windows requires that a file positioning call be made
         # when the file handle transitions between reads and writes. See
@@ -2442,14 +2416,18 @@
         # Note: This is likely not necessary on Python 3. However, because
         # the file handle is reused for reads and may be seeked there, we need
         # to be careful before changing this.
+        if self._writinghandles is None:
+            msg = b'adding revision outside `revlog._writing` context'
+            raise error.ProgrammingError(msg)
+        ifh, dfh = self._writinghandles
         ifh.seek(0, os.SEEK_END)
         if dfh:
             dfh.seek(0, os.SEEK_END)
 
         curr = len(self) - 1
         if not self._inline:
-            transaction.add(self.datafile, offset)
-            transaction.add(self.indexfile, curr * len(entry))
+            transaction.add(self._datafile, offset)
+            transaction.add(self._indexfile, curr * len(entry))
             if data[0]:
                 dfh.write(data[0])
             dfh.write(data[1])
@@ -2458,13 +2436,13 @@
             ifh.write(entry)
         else:
             offset += curr * self.index.entry_size
-            transaction.add(self.indexfile, offset)
+            transaction.add(self._indexfile, offset)
             ifh.write(entry)
             ifh.write(data[0])
             ifh.write(data[1])
             if sidedata:
                 ifh.write(sidedata)
-            self._enforceinlinesize(transaction, ifh)
+            self._enforceinlinesize(transaction)
         nodemaputil.setup_persistent_nodemap(transaction, self)
 
     def addgroup(
@@ -2487,115 +2465,93 @@
         this revlog and the node that was added.
         """
 
-        if self._writinghandles:
+        if self._adding_group:
             raise error.ProgrammingError(b'cannot nest addgroup() calls')
 
-        r = len(self)
-        end = 0
-        if r:
-            end = self.end(r - 1)
-        ifh = self._indexfp(b"a+")
-        isize = r * self.index.entry_size
-        if self._inline:
-            transaction.add(self.indexfile, end + isize)
-            dfh = None
-        else:
-            transaction.add(self.indexfile, isize)
-            transaction.add(self.datafile, end)
-            dfh = self._datafp(b"a+")
-
-        def flush():
-            if dfh:
-                dfh.flush()
-            ifh.flush()
-
-        self._writinghandles = (ifh, dfh)
+        self._adding_group = True
         empty = True
-
         try:
-            deltacomputer = deltautil.deltacomputer(self)
-            # loop through our set of deltas
-            for data in deltas:
-                node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
-                link = linkmapper(linknode)
-                flags = flags or REVIDX_DEFAULT_FLAGS
-
-                rev = self.index.get_rev(node)
-                if rev is not None:
-                    # this can happen if two branches make the same change
-                    self._nodeduplicatecallback(transaction, rev)
-                    if duplicaterevisioncb:
-                        duplicaterevisioncb(self, rev)
-                    empty = False
-                    continue
-
-                for p in (p1, p2):
-                    if not self.index.has_node(p):
+            with self._writing(transaction):
+                deltacomputer = deltautil.deltacomputer(self)
+                # loop through our set of deltas
+                for data in deltas:
+                    (
+                        node,
+                        p1,
+                        p2,
+                        linknode,
+                        deltabase,
+                        delta,
+                        flags,
+                        sidedata,
+                    ) = data
+                    link = linkmapper(linknode)
+                    flags = flags or REVIDX_DEFAULT_FLAGS
+
+                    rev = self.index.get_rev(node)
+                    if rev is not None:
+                        # this can happen if two branches make the same change
+                        self._nodeduplicatecallback(transaction, rev)
+                        if duplicaterevisioncb:
+                            duplicaterevisioncb(self, rev)
+                        empty = False
+                        continue
+
+                    for p in (p1, p2):
+                        if not self.index.has_node(p):
+                            raise error.LookupError(
+                                p, self.radix, _(b'unknown parent')
+                            )
+
+                    if not self.index.has_node(deltabase):
                         raise error.LookupError(
-                            p, self.indexfile, _(b'unknown parent')
+                            deltabase, self.display_id, _(b'unknown delta base')
                         )
 
-                if not self.index.has_node(deltabase):
-                    raise error.LookupError(
-                        deltabase, self.indexfile, _(b'unknown delta base')
+                    baserev = self.rev(deltabase)
+
+                    if baserev != nullrev and self.iscensored(baserev):
+                        # if base is censored, delta must be full replacement in a
+                        # single patch operation
+                        hlen = struct.calcsize(b">lll")
+                        oldlen = self.rawsize(baserev)
+                        newlen = len(delta) - hlen
+                        if delta[:hlen] != mdiff.replacediffheader(
+                            oldlen, newlen
+                        ):
+                            raise error.CensoredBaseError(
+                                self.display_id, self.node(baserev)
+                            )
+
+                    if not flags and self._peek_iscensored(baserev, delta):
+                        flags |= REVIDX_ISCENSORED
+
+                    # We assume consumers of addrevisioncb will want to retrieve
+                    # the added revision, which will require a call to
+                    # revision(). revision() will fast path if there is a cache
+                    # hit. So, we tell _addrevision() to always cache in this case.
+                    # We're only using addgroup() in the context of changegroup
+                    # generation so the revision data can always be handled as raw
+                    # by the flagprocessor.
+                    rev = self._addrevision(
+                        node,
+                        None,
+                        transaction,
+                        link,
+                        p1,
+                        p2,
+                        flags,
+                        (baserev, delta),
+                        alwayscache=alwayscache,
+                        deltacomputer=deltacomputer,
+                        sidedata=sidedata,
                     )
 
-                baserev = self.rev(deltabase)
-
-                if baserev != nullrev and self.iscensored(baserev):
-                    # if base is censored, delta must be full replacement in a
-                    # single patch operation
-                    hlen = struct.calcsize(b">lll")
-                    oldlen = self.rawsize(baserev)
-                    newlen = len(delta) - hlen
-                    if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
-                        raise error.CensoredBaseError(
-                            self.indexfile, self.node(baserev)
-                        )
-
-                if not flags and self._peek_iscensored(baserev, delta, flush):
-                    flags |= REVIDX_ISCENSORED
-
-                # We assume consumers of addrevisioncb will want to retrieve
-                # the added revision, which will require a call to
-                # revision(). revision() will fast path if there is a cache
-                # hit. So, we tell _addrevision() to always cache in this case.
-                # We're only using addgroup() in the context of changegroup
-                # generation so the revision data can always be handled as raw
-                # by the flagprocessor.
-                rev = self._addrevision(
-                    node,
-                    None,
-                    transaction,
-                    link,
-                    p1,
-                    p2,
-                    flags,
-                    (baserev, delta),
-                    ifh,
-                    dfh,
-                    alwayscache=alwayscache,
-                    deltacomputer=deltacomputer,
-                    sidedata=sidedata,
-                )
-
-                if addrevisioncb:
-                    addrevisioncb(self, rev)
-                empty = False
-
-                if not dfh and not self._inline:
-                    # addrevision switched from inline to conventional
-                    # reopen the index
-                    ifh.close()
-                    dfh = self._datafp(b"a+")
-                    ifh = self._indexfp(b"a+")
-                    self._writinghandles = (ifh, dfh)
+                    if addrevisioncb:
+                        addrevisioncb(self, rev)
+                    empty = False
         finally:
-            self._writinghandles = None
-
-            if dfh:
-                dfh.close()
-            ifh.close()
+            self._adding_group = False
         return not empty
 
     def iscensored(self, rev):
@@ -2605,7 +2561,7 @@
 
         return self.flags(rev) & REVIDX_ISCENSORED
 
-    def _peek_iscensored(self, baserev, delta, flush):
+    def _peek_iscensored(self, baserev, delta):
         """Quickly check if a delta produces a censored revision."""
         if not self._censorable:
             return False
@@ -2650,12 +2606,12 @@
         # first truncate the files on disk
         end = self.start(rev)
         if not self._inline:
-            transaction.add(self.datafile, end)
+            transaction.add(self._datafile, end)
             end = rev * self.index.entry_size
         else:
             end += rev * self.index.entry_size
 
-        transaction.add(self.indexfile, end)
+        transaction.add(self._indexfile, end)
 
         # then reset internal state in memory to forget those revisions
         self._revisioncache = None
@@ -2688,7 +2644,7 @@
             dd = 0
 
         try:
-            f = self.opener(self.indexfile)
+            f = self.opener(self._indexfile)
             f.seek(0, io.SEEK_END)
             actual = f.tell()
             f.close()
@@ -2709,9 +2665,9 @@
         return (dd, di)
 
     def files(self):
-        res = [self.indexfile]
+        res = [self._indexfile]
         if not self._inline:
-            res.append(self.datafile)
+            res.append(self._datafile)
         return res
 
     def emitrevisions(
@@ -2768,7 +2724,7 @@
         addrevisioncb=None,
         deltareuse=DELTAREUSESAMEREVS,
         forcedeltabothparents=None,
-        sidedatacompanion=None,
+        sidedata_helpers=None,
     ):
         """Copy this revlog to another, possibly with format changes.
 
@@ -2811,21 +2767,8 @@
         argument controls whether to force compute deltas against both parents
         for merges. By default, the current default is used.
 
-        If not None, the `sidedatacompanion` is callable that accept two
-        arguments:
-
-            (srcrevlog, rev)
-
-        and return a quintet that control changes to sidedata content from the
-        old revision to the new clone result:
-
-            (dropall, filterout, update, new_flags, dropped_flags)
-
-        * if `dropall` is True, all sidedata should be dropped
-        * `filterout` is a set of sidedata keys that should be dropped
-        * `update` is a mapping of additionnal/new key -> value
-        * new_flags is a bitfields of new flags that the revision should get
-        * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
+        See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+        `sidedata_helpers`.
         """
         if deltareuse not in self.DELTAREUSEALL:
             raise ValueError(
@@ -2865,7 +2808,7 @@
                 addrevisioncb,
                 deltareuse,
                 forcedeltabothparents,
-                sidedatacompanion,
+                sidedata_helpers,
             )
 
         finally:
@@ -2880,7 +2823,7 @@
         addrevisioncb,
         deltareuse,
         forcedeltabothparents,
-        sidedatacompanion,
+        sidedata_helpers,
     ):
         """perform the core duty of `revlog.clone` after parameter processing"""
         deltacomputer = deltautil.deltacomputer(destrevlog)
@@ -2896,31 +2839,18 @@
             p2 = index[entry[6]][7]
             node = entry[7]
 
-            sidedataactions = (False, [], {}, 0, 0)
-            if sidedatacompanion is not None:
-                sidedataactions = sidedatacompanion(self, rev)
-
             # (Possibly) reuse the delta from the revlog if allowed and
             # the revlog chunk is a delta.
             cachedelta = None
             rawtext = None
-            if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
-                dropall = sidedataactions[0]
-                filterout = sidedataactions[1]
-                update = sidedataactions[2]
-                new_flags = sidedataactions[3]
-                dropped_flags = sidedataactions[4]
+            if deltareuse == self.DELTAREUSEFULLADD:
                 text, sidedata = self._revisiondata(rev)
-                if dropall:
-                    sidedata = {}
-                for key in filterout:
-                    sidedata.pop(key, None)
-                sidedata.update(update)
-                if not sidedata:
-                    sidedata = None
-
-                flags |= new_flags
-                flags &= ~dropped_flags
+
+                if sidedata_helpers is not None:
+                    (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+                        self, sidedata_helpers, sidedata, rev
+                    )
+                    flags = flags | new_flags[0] & ~new_flags[1]
 
                 destrevlog.addrevision(
                     text,
@@ -2940,16 +2870,19 @@
                     if dp != nullrev:
                         cachedelta = (dp, bytes(self._chunk(rev)))
 
+                sidedata = None
                 if not cachedelta:
-                    rawtext = self.rawdata(rev)
-
-                ifh = destrevlog.opener(
-                    destrevlog.indexfile, b'a+', checkambig=False
-                )
-                dfh = None
-                if not destrevlog._inline:
-                    dfh = destrevlog.opener(destrevlog.datafile, b'a+')
-                try:
+                    rawtext, sidedata = self._revisiondata(rev)
+                if sidedata is None:
+                    sidedata = self.sidedata(rev)
+
+                if sidedata_helpers is not None:
+                    (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+                        self, sidedata_helpers, sidedata, rev
+                    )
+                    flags = flags | new_flags[0] & ~new_flags[1]
+
+                with destrevlog._writing(tr):
                     destrevlog._addrevision(
                         node,
                         rawtext,
@@ -2959,22 +2892,18 @@
                         p2,
                         flags,
                         cachedelta,
-                        ifh,
-                        dfh,
                         deltacomputer=deltacomputer,
+                        sidedata=sidedata,
                     )
-                finally:
-                    if dfh:
-                        dfh.close()
-                    ifh.close()
 
             if addrevisioncb:
                 addrevisioncb(self, rev, node)
 
     def censorrevision(self, tr, censornode, tombstone=b''):
-        if (self.version & 0xFFFF) == REVLOGV0:
+        if self._format_version == REVLOGV0:
             raise error.RevlogError(
-                _(b'cannot censor with version %d revlogs') % self.version
+                _(b'cannot censor with version %d revlogs')
+                % self._format_version
             )
 
         censorrev = self.rev(censornode)
@@ -2988,15 +2917,19 @@
         # Rewriting the revlog in place is hard. Our strategy for censoring is
         # to create a new revlog, copy all revisions to it, then replace the
         # revlogs on transaction close.
-
-        newindexfile = self.indexfile + b'.tmpcensored'
-        newdatafile = self.datafile + b'.tmpcensored'
-
+        #
         # This is a bit dangerous. We could easily have a mismatch of state.
-        newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
-        newrl.version = self.version
+        newrl = revlog(
+            self.opener,
+            target=self.target,
+            radix=self.radix,
+            postfix=b'tmpcensored',
+            censorable=True,
+        )
+        newrl._format_version = self._format_version
+        newrl._format_flags = self._format_flags
         newrl._generaldelta = self._generaldelta
-        newrl._io = self._io
+        newrl._parse_index = self._parse_index
 
         for rev in self.revs():
             node = self.node(rev)
@@ -3043,13 +2976,13 @@
                 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
             )
 
-        tr.addbackup(self.indexfile, location=b'store')
+        tr.addbackup(self._indexfile, location=b'store')
         if not self._inline:
-            tr.addbackup(self.datafile, location=b'store')
-
-        self.opener.rename(newrl.indexfile, self.indexfile)
+            tr.addbackup(self._datafile, location=b'store')
+
+        self.opener.rename(newrl._indexfile, self._indexfile)
         if not self._inline:
-            self.opener.rename(newrl.datafile, self.datafile)
+            self.opener.rename(newrl._datafile, self._datafile)
 
         self.clearcaches()
         self._loadindex()
@@ -3066,13 +2999,13 @@
         if di:
             yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
 
-        version = self.version & 0xFFFF
+        version = self._format_version
 
         # The verifier tells us what version revlog we should be.
         if version != state[b'expectedversion']:
             yield revlogproblem(
                 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
-                % (self.indexfile, version, state[b'expectedversion'])
+                % (self.display_id, version, state[b'expectedversion'])
             )
 
         state[b'skipread'] = set()
@@ -3170,9 +3103,9 @@
         d = {}
 
         if exclusivefiles:
-            d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
+            d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
             if not self._inline:
-                d[b'exclusivefiles'].append((self.opener, self.datafile))
+                d[b'exclusivefiles'].append((self.opener, self._datafile))
 
         if sharedfiles:
             d[b'sharedfiles'] = []
@@ -3190,8 +3123,8 @@
 
         return d
 
-    def rewrite_sidedata(self, helpers, startrev, endrev):
-        if self.version & 0xFFFF != REVLOGV2:
+    def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
+        if not self.hassidedata:
             return
         # inline are not yet supported because they suffer from an issue when
         # rewriting them (since it's not an append-only operation).
@@ -3201,15 +3134,27 @@
             # Nothing to generate or remove
             return
 
+        # changelog implement some "delayed" writing mechanism that assume that
+        # all index data is writen in append mode and is therefor incompatible
+        # with the seeked write done in this method. The use of such "delayed"
+        # writing will soon be removed for revlog version that support side
+        # data, so for now, we only keep this simple assert to highlight the
+        # situation.
+        delayed = getattr(self, '_delayed', False)
+        diverted = getattr(self, '_divert', False)
+        if delayed and not diverted:
+            msg = "cannot rewrite_sidedata of a delayed revlog"
+            raise error.ProgrammingError(msg)
+
         new_entries = []
         # append the new sidedata
-        with self._datafp(b'a+') as fp:
-            # Maybe this bug still exists, see revlog._writeentry
-            fp.seek(0, os.SEEK_END)
-            current_offset = fp.tell()
+        with self._writing(transaction):
+            ifh, dfh = self._writinghandles
+            dfh.seek(0, os.SEEK_END)
+            current_offset = dfh.tell()
             for rev in range(startrev, endrev + 1):
                 entry = self.index[rev]
-                new_sidedata = storageutil.run_sidedata_helpers(
+                new_sidedata, flags = sidedatautil.run_sidedata_helpers(
                     store=self,
                     sidedata_helpers=helpers,
                     sidedata={},
@@ -3223,20 +3168,29 @@
                     # rewriting entries that already have sidedata is not
                     # supported yet, because it introduces garbage data in the
                     # revlog.
-                    msg = b"Rewriting existing sidedata is not supported yet"
+                    msg = b"rewriting existing sidedata is not supported yet"
                     raise error.Abort(msg)
-                entry = entry[:8]
+
+                # Apply (potential) flags to add and to remove after running
+                # the sidedata helpers
+                new_offset_flags = entry[0] | flags[0] & ~flags[1]
+                entry = (new_offset_flags,) + entry[1:8]
                 entry += (current_offset, len(serialized_sidedata))
 
-                fp.write(serialized_sidedata)
+                # the sidedata computation might have move the file cursors around
+                dfh.seek(current_offset, os.SEEK_SET)
+                dfh.write(serialized_sidedata)
                 new_entries.append(entry)
                 current_offset += len(serialized_sidedata)
 
-        # rewrite the new index entries
-        with self._indexfp(b'w+') as fp:
-            fp.seek(startrev * self.index.entry_size)
-            for i, entry in enumerate(new_entries):
+            # rewrite the new index entries
+            ifh.seek(startrev * self.index.entry_size)
+            for i, e in enumerate(new_entries):
                 rev = startrev + i
-                self.index.replace_sidedata_info(rev, entry[8], entry[9])
-                packed = self._io.packentry(entry, self.node, self.version, rev)
-                fp.write(packed)
+                self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
+                packed = self.index.entry_binary(rev)
+                if rev == 0:
+                    header = self._format_flags | self._format_version
+                    header = self.index.pack_header(header)
+                    packed = header + packed
+                ifh.write(packed)
--- a/mercurial/revlogutils/constants.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/revlogutils/constants.py	Mon May 17 15:05:24 2021 +0200
@@ -13,6 +13,20 @@
 
 from ..interfaces import repository
 
+### Internal utily constants
+
+KIND_CHANGELOG = 1001  # over 256 to not be comparable with a bytes
+KIND_MANIFESTLOG = 1002
+KIND_FILELOG = 1003
+KIND_OTHER = 1004
+
+ALL_KINDS = {
+    KIND_CHANGELOG,
+    KIND_MANIFESTLOG,
+    KIND_FILELOG,
+    KIND_OTHER,
+}
+
 ### main revlog header
 
 INDEX_HEADER = struct.Struct(b">I")
@@ -31,6 +45,7 @@
 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
 REVLOG_DEFAULT_FORMAT = REVLOGV1
 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
+REVLOGV0_FLAGS = 0
 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
 REVLOGV2_FLAGS = FLAG_INLINE_DATA
 
@@ -85,8 +100,6 @@
 REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
 # revision data is stored externally
 REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
-# revision data contains extra metadata not part of the official digest
-REVIDX_SIDEDATA = repository.REVISION_FLAG_SIDEDATA
 # revision changes files in a way that could affect copy tracing.
 REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
 REVIDX_DEFAULT_FLAGS = 0
@@ -95,13 +108,46 @@
     REVIDX_ISCENSORED,
     REVIDX_ELLIPSIS,
     REVIDX_EXTSTORED,
-    REVIDX_SIDEDATA,
     REVIDX_HASCOPIESINFO,
 ]
 
 # bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = (
-    REVIDX_ISCENSORED | REVIDX_EXTSTORED | REVIDX_SIDEDATA
-)
+REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
+
+SUPPORTED_FLAGS = {
+    REVLOGV0: REVLOGV0_FLAGS,
+    REVLOGV1: REVLOGV1_FLAGS,
+    REVLOGV2: REVLOGV2_FLAGS,
+}
+
+_no = lambda flags: False
+_yes = lambda flags: True
+
+
+def _from_flag(flag):
+    return lambda flags: bool(flags & flag)
+
+
+FEATURES_BY_VERSION = {
+    REVLOGV0: {
+        b'inline': _no,
+        b'generaldelta': _no,
+        b'sidedata': False,
+    },
+    REVLOGV1: {
+        b'inline': _from_flag(FLAG_INLINE_DATA),
+        b'generaldelta': _from_flag(FLAG_GENERALDELTA),
+        b'sidedata': False,
+    },
+    REVLOGV2: {
+        # There is a bug in the transaction handling when going from an
+        # inline revlog to a separate index and data file. Turn it off until
+        # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
+        # See issue6485
+        b'inline': _no,
+        b'generaldelta': _yes,
+        b'sidedata': True,
+    },
+}
 
 SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
--- a/mercurial/revlogutils/flagutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/revlogutils/flagutil.py	Mon May 17 15:05:24 2021 +0200
@@ -18,7 +18,6 @@
     REVIDX_HASCOPIESINFO,
     REVIDX_ISCENSORED,
     REVIDX_RAWTEXT_CHANGING_FLAGS,
-    REVIDX_SIDEDATA,
 )
 
 from .. import error, util
@@ -28,7 +27,6 @@
 REVIDX_ISCENSORED
 REVIDX_ELLIPSIS
 REVIDX_EXTSTORED
-REVIDX_SIDEDATA
 REVIDX_HASCOPIESINFO,
 REVIDX_DEFAULT_FLAGS
 REVIDX_FLAGS_ORDER
--- a/mercurial/revlogutils/nodemap.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/revlogutils/nodemap.py	Mon May 17 15:05:24 2021 +0200
@@ -28,9 +28,9 @@
 
 def persisted_data(revlog):
     """read the nodemap for a revlog from disk"""
-    if revlog.nodemap_file is None:
+    if revlog._nodemap_file is None:
         return None
-    pdata = revlog.opener.tryread(revlog.nodemap_file)
+    pdata = revlog.opener.tryread(revlog._nodemap_file)
     if not pdata:
         return None
     offset = 0
@@ -77,11 +77,11 @@
     """
     if revlog._inline:
         return  # inlined revlog are too small for this to be relevant
-    if revlog.nodemap_file is None:
+    if revlog._nodemap_file is None:
         return  # we do not use persistent_nodemap on this revlog
 
     # we need to happen after the changelog finalization, in that use "cl-"
-    callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
+    callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog._nodemap_file
     if tr.hasfinalize(callback_id):
         return  # no need to register again
     tr.addpending(
@@ -123,7 +123,7 @@
     """
     if revlog._inline:
         return  # inlined revlog are too small for this to be relevant
-    if revlog.nodemap_file is None:
+    if revlog._nodemap_file is None:
         return  # we do not use persistent_nodemap on this revlog
 
     notr = _NoTransaction()
@@ -133,11 +133,11 @@
 
 
 def delete_nodemap(tr, repo, revlog):
-    """Delete nodemap data on disk for a given revlog"""
-    if revlog.nodemap_file is None:
+    """ Delete nodemap data on disk for a given revlog"""
+    if revlog._nodemap_file is None:
         msg = "calling persist nodemap on a revlog without the feature enabled"
         raise error.ProgrammingError(msg)
-    repo.svfs.unlink(revlog.nodemap_file)
+    repo.svfs.unlink(revlog._nodemap_file)
 
 
 def persist_nodemap(tr, revlog, pending=False, force=False):
@@ -146,11 +146,9 @@
         raise error.ProgrammingError(
             "cannot persist nodemap of a filtered changelog"
         )
-    if revlog.nodemap_file is None:
+    if revlog._nodemap_file is None:
         if force:
-            revlog.nodemap_file = get_nodemap_file(
-                revlog.opener, revlog.indexfile
-            )
+            revlog._nodemap_file = get_nodemap_file(revlog)
         else:
             msg = "calling persist nodemap on a revlog without the feature enabled"
             raise error.ProgrammingError(msg)
@@ -227,7 +225,7 @@
     target_docket.tip_node = revlog.node(target_docket.tip_rev)
     # EXP-TODO: if this is a cache, this should use a cache vfs, not a
     # store vfs
-    file_path = revlog.nodemap_file
+    file_path = revlog._nodemap_file
     if pending:
         file_path += b'.a'
         tr.registertmp(file_path)
@@ -250,7 +248,7 @@
             for oldfile in olds:
                 realvfs.tryunlink(oldfile)
 
-        callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
+        callback_id = b"revlog-cleanup-nodemap-%s" % revlog._nodemap_file
         tr.addpostclose(callback_id, cleanup)
 
 
@@ -365,15 +363,12 @@
 
 def _rawdata_filepath(revlog, docket):
     """The (vfs relative) nodemap's rawdata file for a given uid"""
-    if revlog.nodemap_file.endswith(b'.n.a'):
-        prefix = revlog.nodemap_file[:-4]
-    else:
-        prefix = revlog.nodemap_file[:-2]
+    prefix = revlog.radix
     return b"%s-%s.nd" % (prefix, docket.uid)
 
 
 def _other_rawdata_filepath(revlog, docket):
-    prefix = revlog.nodemap_file[:-2]
+    prefix = revlog.radix
     pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
     new_file_path = _rawdata_filepath(revlog, docket)
     new_file_name = revlog.opener.basename(new_file_path)
@@ -653,12 +648,9 @@
     return entry
 
 
-def get_nodemap_file(opener, indexfile):
-    if indexfile.endswith(b'.a'):
-        pending_path = indexfile[:-4] + b".n.a"
-        if opener.exists(pending_path):
+def get_nodemap_file(revlog):
+    if revlog.postfix == b'a':
+        pending_path = revlog.radix + b".n.a"
+        if revlog.opener.exists(pending_path):
             return pending_path
-        else:
-            return indexfile[:-4] + b".n"
-    else:
-        return indexfile[:-2] + b".n"
+    return revlog.radix + b".n"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/revlogv0.py	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,146 @@
+# revlogv0 - code related to revlog format "V0"
+#
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+
+from ..node import sha1nodeconstants
+from .constants import (
+    INDEX_ENTRY_V0,
+)
+from ..i18n import _
+
+from .. import (
+    error,
+    node,
+    pycompat,
+    util,
+)
+
+from . import (
+    flagutil,
+    nodemap as nodemaputil,
+)
+
+
+def getoffset(q):
+    return int(q >> 16)
+
+
+def gettype(q):
+    return int(q & 0xFFFF)
+
+
+def offset_type(offset, type):
+    if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
+        raise ValueError(b'unknown revlog index flags')
+    return int(int(offset) << 16 | type)
+
+
+class revlogoldindex(list):
+    entry_size = INDEX_ENTRY_V0.size
+
+    @property
+    def nodemap(self):
+        msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+        return self._nodemap
+
+    @util.propertycache
+    def _nodemap(self):
+        nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
+        for r in range(0, len(self)):
+            n = self[r][7]
+            nodemap[n] = r
+        return nodemap
+
+    def has_node(self, node):
+        """return True if the node exist in the index"""
+        return node in self._nodemap
+
+    def rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, raise a RevlogError"""
+        return self._nodemap[node]
+
+    def get_rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, return None"""
+        return self._nodemap.get(node)
+
+    def append(self, tup):
+        self._nodemap[tup[7]] = len(self)
+        super(revlogoldindex, self).append(tup)
+
+    def __delitem__(self, i):
+        if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
+            raise ValueError(b"deleting slices only supports a:-1 with step 1")
+        for r in pycompat.xrange(i.start, len(self)):
+            del self._nodemap[self[r][7]]
+        super(revlogoldindex, self).__delitem__(i)
+
+    def clearcaches(self):
+        self.__dict__.pop('_nodemap', None)
+
+    def __getitem__(self, i):
+        if i == -1:
+            return (0, 0, 0, -1, -1, -1, -1, node.nullid)
+        return list.__getitem__(self, i)
+
+    def pack_header(self, header):
+        """pack header information in binary"""
+        return b''
+
+    def entry_binary(self, rev):
+        """return the raw binary string representing a revision"""
+        entry = self[rev]
+        if gettype(entry[0]):
+            raise error.RevlogError(
+                _(b'index entry flags need revlog version 1')
+            )
+        e2 = (
+            getoffset(entry[0]),
+            entry[1],
+            entry[3],
+            entry[4],
+            self[entry[5]][7],
+            self[entry[6]][7],
+            entry[7],
+        )
+        return INDEX_ENTRY_V0.pack(*e2)
+
+
+def parse_index_v0(data, inline):
+    s = INDEX_ENTRY_V0.size
+    index = []
+    nodemap = nodemaputil.NodeMap({node.nullid: node.nullrev})
+    n = off = 0
+    l = len(data)
+    while off + s <= l:
+        cur = data[off : off + s]
+        off += s
+        e = INDEX_ENTRY_V0.unpack(cur)
+        # transform to revlogv1 format
+        e2 = (
+            offset_type(e[0], 0),
+            e[1],
+            -1,
+            e[2],
+            e[3],
+            nodemap.get(e[4], node.nullrev),
+            nodemap.get(e[5], node.nullrev),
+            e[6],
+            0,  # no side data support
+            0,  # no side data support
+        )
+        index.append(e2)
+        nodemap[e[6]] = n
+        n += 1
+
+    index = revlogoldindex(index)
+    return index, None
--- a/mercurial/revlogutils/sidedata.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/revlogutils/sidedata.py	Mon May 17 15:05:24 2021 +0200
@@ -32,9 +32,11 @@
 
 from __future__ import absolute_import
 
+import collections
 import struct
 
-from .. import error
+from .. import error, requirements as requirementsmod
+from ..revlogutils import constants, flagutil
 from ..utils import hashutil
 
 ## sidedata type constant
@@ -91,3 +93,83 @@
         sidedata[key] = entrytext
         dataoffset = nextdataoffset
     return sidedata
+
+
+def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
+    """
+    Returns a dictionary mapping revlog types to tuples of
+    `(repo, computers, removers)`:
+        * `repo` is used as an argument for computers
+        * `computers` is a list of `(category, (keys, computer, flags)` that
+           compute the missing sidedata categories that were asked:
+           * `category` is the sidedata category
+           * `keys` are the sidedata keys to be affected
+           * `flags` is a bitmask (an integer) of flags to remove when
+              removing the category.
+           * `computer` is the function `(repo, store, rev, sidedata)` that
+             returns a tuple of
+             `(new sidedata dict, (flags to add, flags to remove))`.
+             For example, it will return `({}, (0, 1 << 15))` to return no
+             sidedata, with no flags to add and one flag to remove.
+        * `removers` will remove the keys corresponding to the categories
+          that are present, but not needed.
+        If both `computers` and `removers` are empty, sidedata will simply not
+        be transformed.
+    """
+    # Computers for computing sidedata on-the-fly
+    sd_computers = collections.defaultdict(list)
+    # Computers for categories to remove from sidedata
+    sd_removers = collections.defaultdict(list)
+    to_generate = remote_sd_categories - repo._wanted_sidedata
+    to_remove = repo._wanted_sidedata - remote_sd_categories
+    if pull:
+        to_generate, to_remove = to_remove, to_generate
+
+    for revlog_kind, computers in repo._sidedata_computers.items():
+        for category, computer in computers.items():
+            if category in to_generate:
+                sd_computers[revlog_kind].append(computer)
+            if category in to_remove:
+                sd_removers[revlog_kind].append(computer)
+
+    sidedata_helpers = (repo, sd_computers, sd_removers)
+    return sidedata_helpers
+
+
+def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
+    """Returns the sidedata for the given revision after running through
+    the given helpers.
+    - `store`: the revlog this applies to (changelog, manifest, or filelog
+      instance)
+    - `sidedata_helpers`: see `get_sidedata_helpers`
+    - `sidedata`: previous sidedata at the given rev, if any
+    - `rev`: affected rev of `store`
+    """
+    repo, sd_computers, sd_removers = sidedata_helpers
+    kind = store.revlog_kind
+    flags_to_add = 0
+    flags_to_remove = 0
+    for _keys, sd_computer, _flags in sd_computers.get(kind, []):
+        sidedata, flags = sd_computer(repo, store, rev, sidedata)
+        flags_to_add |= flags[0]
+        flags_to_remove |= flags[1]
+    for keys, _computer, flags in sd_removers.get(kind, []):
+        for key in keys:
+            sidedata.pop(key, None)
+        flags_to_remove |= flags
+    return sidedata, (flags_to_add, flags_to_remove)
+
+
+def set_sidedata_spec_for_repo(repo):
+    # prevent cycle metadata -> revlogutils.sidedata -> metadata
+    from .. import metadata
+
+    if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
+        repo.register_wanted_sidedata(SD_FILES)
+    repo.register_sidedata_computer(
+        constants.KIND_CHANGELOG,
+        SD_FILES,
+        (SD_FILES,),
+        metadata.copies_sidedata_computer,
+        flagutil.REVIDX_HASCOPIESINFO,
+    )
--- a/mercurial/revset.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/revset.py	Mon May 17 15:05:24 2021 +0200
@@ -1724,7 +1724,7 @@
 def _node(repo, n):
     """process a node input"""
     rn = None
-    if len(n) == 40:
+    if len(n) == 2 * repo.nodeconstants.nodelen:
         try:
             rn = repo.changelog.rev(bin(n))
         except error.WdirUnsupported:
--- a/mercurial/rewriteutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/rewriteutil.py	Mon May 17 15:05:24 2021 +0200
@@ -17,16 +17,38 @@
 
 from . import (
     error,
+    node,
     obsolete,
     obsutil,
     revset,
     scmutil,
+    util,
 )
 
 
 NODE_RE = re.compile(br'\b[0-9a-f]{6,64}\b')
 
 
+def _formatrevs(repo, revs, maxrevs=4):
+    """returns a string summarizing revisions in a decent size
+
+    If there are few enough revisions, we list them all. Otherwise we display a
+    summary of the form:
+
+        1ea73414a91b and 5 others
+    """
+    tonode = repo.changelog.node
+    numrevs = len(revs)
+    if numrevs < maxrevs:
+        shorts = [node.short(tonode(r)) for r in revs]
+        summary = b', '.join(shorts)
+    else:
+        first = revs.first()
+        summary = _(b'%s and %d others')
+        summary %= (node.short(tonode(first)), numrevs - 1)
+    return summary
+
+
 def precheck(repo, revs, action=b'rewrite'):
     """check if revs can be rewritten
     action is used to control the error message.
@@ -34,22 +56,66 @@
     Make sure this function is called after taking the lock.
     """
     if nullrev in revs:
-        msg = _(b"cannot %s null changeset") % action
+        msg = _(b"cannot %s the null revision") % action
         hint = _(b"no changeset checked out")
         raise error.InputError(msg, hint=hint)
 
+    if any(util.safehasattr(r, 'rev') for r in revs):
+        repo.ui.develwarn(b"rewriteutil.precheck called with ctx not revs")
+        revs = (r.rev() for r in revs)
+
     if len(repo[None].parents()) > 1:
-        raise error.StateError(_(b"cannot %s while merging") % action)
+        raise error.StateError(
+            _(b"cannot %s changesets while merging") % action
+        )
 
     publicrevs = repo.revs(b'%ld and public()', revs)
     if publicrevs:
-        msg = _(b"cannot %s public changesets") % action
+        summary = _formatrevs(repo, publicrevs)
+        msg = _(b"cannot %s public changesets: %s") % (action, summary)
         hint = _(b"see 'hg help phases' for details")
         raise error.InputError(msg, hint=hint)
 
     newunstable = disallowednewunstable(repo, revs)
     if newunstable:
-        raise error.InputError(_(b"cannot %s changeset with children") % action)
+        hint = _(b"see 'hg help evolution.instability'")
+        raise error.InputError(
+            _(b"cannot %s changeset, as that will orphan %d descendants")
+            % (action, len(newunstable)),
+            hint=hint,
+        )
+
+    if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
+        new_divergence = _find_new_divergence(repo, revs)
+        if new_divergence:
+            local_ctx, other_ctx, base_ctx = new_divergence
+            msg = _(
+                b'cannot %s %s, as that creates content-divergence with %s'
+            ) % (
+                action,
+                local_ctx,
+                other_ctx,
+            )
+            if local_ctx.rev() != base_ctx.rev():
+                msg += _(b', from %s') % base_ctx
+            if repo.ui.verbose:
+                if local_ctx.rev() != base_ctx.rev():
+                    msg += _(
+                        b'\n    changeset %s is a successor of ' b'changeset %s'
+                    ) % (local_ctx, base_ctx)
+                msg += _(
+                    b'\n    changeset %s already has a successor in '
+                    b'changeset %s\n'
+                    b'    rewriting changeset %s would create '
+                    b'"content-divergence"\n'
+                    b'    set experimental.evolution.allowdivergence=True to '
+                    b'skip this check'
+                ) % (base_ctx, other_ctx, local_ctx)
+                raise error.InputError(msg)
+            else:
+                raise error.InputError(
+                    msg, hint=_(b"add --verbose for details")
+                )
 
 
 def disallowednewunstable(repo, revs):
@@ -65,6 +131,40 @@
     return repo.revs(b"(%ld::) - %ld", revs, revs)
 
 
+def _find_new_divergence(repo, revs):
+    obsrevs = repo.revs(b'%ld and obsolete()', revs)
+    for r in obsrevs:
+        div = find_new_divergence_from(repo, repo[r])
+        if div:
+            return (repo[r], repo[div[0]], repo[div[1]])
+    return None
+
+
+def find_new_divergence_from(repo, ctx):
+    """return divergent revision if rewriting an obsolete cset (ctx) will
+    create divergence
+
+    Returns (<other node>, <common ancestor node>) or None
+    """
+    if not ctx.obsolete():
+        return None
+    # We need to check two cases that can cause divergence:
+    # case 1: the rev being rewritten has a non-obsolete successor (easily
+    #     detected by successorssets)
+    sset = obsutil.successorssets(repo, ctx.node())
+    if sset:
+        return (sset[0][0], ctx.node())
+    else:
+        # case 2: one of the precursors of the rev being revived has a
+        #     non-obsolete successor (we need divergentsets for this)
+        divsets = obsutil.divergentsets(repo, ctx)
+        if divsets:
+            nsuccset = divsets[0][b'divergentnodes']
+            prec = divsets[0][b'commonpredecessor']
+            return (nsuccset[0], prec)
+        return None
+
+
 def skip_empty_successor(ui, command):
     empty_successor = ui.config(b'rewrite', b'empty-successor')
     if empty_successor == b'skip':
--- a/mercurial/scmutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/scmutil.py	Mon May 17 15:05:24 2021 +0200
@@ -19,10 +19,8 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
-    wdirid,
     wdirrev,
 )
 from .pycompat import getattr
@@ -450,7 +448,7 @@
     """Return binary node id for a given basectx"""
     node = ctx.node()
     if node is None:
-        return wdirid
+        return ctx.repo().nodeconstants.wdirid
     return node
 
 
@@ -645,7 +643,7 @@
         except (ValueError, OverflowError, IndexError):
             pass
 
-        if len(symbol) == 40:
+        if len(symbol) == 2 * repo.nodeconstants.nodelen:
             try:
                 node = bin(symbol)
                 rev = repo.changelog.rev(node)
@@ -1108,7 +1106,7 @@
                     if roots:
                         newnode = roots[0].node()
                     else:
-                        newnode = nullid
+                        newnode = repo.nullid
                 else:
                     newnode = newnodes[0]
                 moves[oldnode] = newnode
@@ -1506,7 +1504,7 @@
     oldctx = repo[b'.']
     ds = repo.dirstate
     copies = dict(ds.copies())
-    ds.setparents(newctx.node(), nullid)
+    ds.setparents(newctx.node(), repo.nullid)
     s = newctx.status(oldctx, match=match)
     for f in s.modified:
         if ds[f] == b'r':
--- a/mercurial/setdiscovery.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/setdiscovery.py	Mon May 17 15:05:24 2021 +0200
@@ -46,10 +46,7 @@
 import random
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from . import (
     error,
     policy,
@@ -391,9 +388,9 @@
             audit[b'total-roundtrips'] = 1
 
         if cl.tiprev() == nullrev:
-            if srvheadhashes != [nullid]:
-                return [nullid], True, srvheadhashes
-            return [nullid], False, []
+            if srvheadhashes != [cl.nullid]:
+                return [cl.nullid], True, srvheadhashes
+            return [cl.nullid], False, []
     else:
         # we still need the remote head for the function return
         with remote.commandexecutor() as e:
@@ -406,7 +403,7 @@
 
     knownsrvheads = []  # revnos of remote heads that are known locally
     for node in srvheadhashes:
-        if node == nullid:
+        if node == cl.nullid:
             continue
 
         try:
@@ -503,17 +500,17 @@
     if audit is not None:
         audit[b'total-roundtrips'] = roundtrips
 
-    if not result and srvheadhashes != [nullid]:
+    if not result and srvheadhashes != [cl.nullid]:
         if abortwhenunrelated:
             raise error.Abort(_(b"repository is unrelated"))
         else:
             ui.warn(_(b"warning: repository is unrelated\n"))
         return (
-            {nullid},
+            {cl.nullid},
             True,
             srvheadhashes,
         )
 
-    anyincoming = srvheadhashes != [nullid]
+    anyincoming = srvheadhashes != [cl.nullid]
     result = {clnode(r) for r in result}
     return result, anyincoming, srvheadhashes
--- a/mercurial/shelve.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/shelve.py	Mon May 17 15:05:24 2021 +0200
@@ -31,7 +31,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from . import (
@@ -822,7 +821,7 @@
         pendingctx = state.pendingctx
 
         with repo.dirstate.parentchange():
-            repo.setparents(state.pendingctx.node(), nullid)
+            repo.setparents(state.pendingctx.node(), repo.nullid)
             repo.dirstate.write(repo.currenttransaction())
 
         targetphase = phases.internal
@@ -831,7 +830,7 @@
         overrides = {(b'phases', b'new-commit'): targetphase}
         with repo.ui.configoverride(overrides, b'unshelve'):
             with repo.dirstate.parentchange():
-                repo.setparents(state.parents[0], nullid)
+                repo.setparents(state.parents[0], repo.nullid)
                 newnode, ispartialunshelve = _createunshelvectx(
                     ui, repo, shelvectx, basename, interactive, opts
                 )
@@ -1027,7 +1026,7 @@
             raise error.ConflictResolutionRequired(b'unshelve')
 
         with repo.dirstate.parentchange():
-            repo.setparents(tmpwctx.node(), nullid)
+            repo.setparents(tmpwctx.node(), repo.nullid)
             newnode, ispartialunshelve = _createunshelvectx(
                 ui, repo, shelvectx, basename, interactive, opts
             )
--- a/mercurial/sparse.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/sparse.py	Mon May 17 15:05:24 2021 +0200
@@ -10,10 +10,7 @@
 import os
 
 from .i18n import _
-from .node import (
-    hex,
-    nullid,
-)
+from .node import hex
 from . import (
     error,
     match as matchmod,
@@ -177,7 +174,7 @@
     revs = [
         repo.changelog.rev(node)
         for node in repo.dirstate.parents()
-        if node != nullid
+        if node != repo.nullid
     ]
 
     allincludes = set()
@@ -321,7 +318,7 @@
         revs = [
             repo.changelog.rev(node)
             for node in repo.dirstate.parents()
-            if node != nullid
+            if node != repo.nullid
         ]
 
     signature = configsignature(repo, includetemp=includetemp)
--- a/mercurial/statichttprepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/statichttprepo.py	Mon May 17 15:05:24 2021 +0200
@@ -177,6 +177,7 @@
         self.filtername = None
         self._extrafilterid = None
         self._wanted_sidedata = set()
+        self.features = set()
 
         try:
             requirements = set(self.vfs.read(b'requires').splitlines())
--- a/mercurial/store.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/store.py	Mon May 17 15:05:24 2021 +0200
@@ -706,7 +706,7 @@
             # do not trigger a fncache load when adding a file that already is
             # known to exist.
             notload = self.fncache.entries is None and self.vfs.exists(encoded)
-            if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
+            if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
                 # when appending to an existing file, if the file has size zero,
                 # it should be considered as missing. Such zero-size files are
                 # the result of truncation when a transaction is aborted.
--- a/mercurial/strip.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/strip.py	Mon May 17 15:05:24 2021 +0200
@@ -2,7 +2,6 @@
 
 from .i18n import _
 from .pycompat import getattr
-from .node import nullid
 from . import (
     bookmarks as bookmarksmod,
     cmdutil,
@@ -39,7 +38,7 @@
 
     if (
         util.safehasattr(repo, b'mq')
-        and p2 != nullid
+        and p2 != repo.nullid
         and p2 in [x.node for x in repo.mq.applied]
     ):
         unode = p2
@@ -218,7 +217,7 @@
         # if one of the wdir parent is stripped we'll need
         # to update away to an earlier revision
         update = any(
-            p != nullid and cl.rev(p) in strippedrevs
+            p != repo.nullid and cl.rev(p) in strippedrevs
             for p in repo.dirstate.parents()
         )
 
--- a/mercurial/subrepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/subrepo.py	Mon May 17 15:05:24 2021 +0200
@@ -21,7 +21,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     short,
 )
 from . import (
@@ -686,7 +685,7 @@
         # we can't fully delete the repository as it may contain
         # local-only history
         self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
-        hg.clean(self._repo, nullid, False)
+        hg.clean(self._repo, self._repo.nullid, False)
 
     def _get(self, state):
         source, revision, kind = state
--- a/mercurial/tagmerge.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/tagmerge.py	Mon May 17 15:05:24 2021 +0200
@@ -74,9 +74,6 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from .node import (
-    nullhex,
-)
 from . import (
     tags as tagsmod,
     util,
@@ -243,8 +240,8 @@
         pnlosttagset = basetagset - pntagset
         for t in pnlosttagset:
             pntags[t] = basetags[t]
-            if pntags[t][-1][0] != nullhex:
-                pntags[t].append([nullhex, None])
+            if pntags[t][-1][0] != repo.nodeconstants.nullhex:
+                pntags[t].append([repo.nodeconstants.nullhex, None])
 
     conflictedtags = []  # for reporting purposes
     mergedtags = util.sortdict(p1tags)
--- a/mercurial/tags.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/tags.py	Mon May 17 15:05:24 2021 +0200
@@ -18,7 +18,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -96,12 +95,12 @@
     return fnodes
 
 
-def _nulltonone(value):
+def _nulltonone(repo, value):
     """convert nullid to None
 
     For tag value, nullid means "deleted". This small utility function helps
     translating that to None."""
-    if value == nullid:
+    if value == repo.nullid:
         return None
     return value
 
@@ -123,14 +122,14 @@
     # list of (tag, old, new): None means missing
     entries = []
     for tag, (new, __) in newtags.items():
-        new = _nulltonone(new)
+        new = _nulltonone(repo, new)
         old, __ = oldtags.pop(tag, (None, None))
-        old = _nulltonone(old)
+        old = _nulltonone(repo, old)
         if old != new:
             entries.append((tag, old, new))
     # handle deleted tags
     for tag, (old, __) in oldtags.items():
-        old = _nulltonone(old)
+        old = _nulltonone(repo, old)
         if old is not None:
             entries.append((tag, old, None))
     entries.sort()
@@ -452,7 +451,7 @@
     repoheads = repo.heads()
     # Case 2 (uncommon): empty repo; get out quickly and don't bother
     # writing an empty cache.
-    if repoheads == [nullid]:
+    if repoheads == [repo.nullid]:
         return ([], {}, valid, {}, False)
 
     # Case 3 (uncommon): cache file missing or empty.
@@ -499,7 +498,7 @@
     for node in nodes:
         fnode = fnodescache.getfnode(node)
         flog = repo.file(b'.hgtags')
-        if fnode != nullid:
+        if fnode != repo.nullid:
             if fnode not in validated_fnodes:
                 if flog.hasnode(fnode):
                     validated_fnodes.add(fnode)
@@ -510,7 +509,7 @@
     if unknown_entries:
         fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
         for node, fnode in pycompat.iteritems(fixed_nodemap):
-            if fnode != nullid:
+            if fnode != repo.nullid:
                 cachefnode[node] = fnode
 
     fnodescache.write()
@@ -632,7 +631,7 @@
                 m = name
 
             if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
-                old = repo.tags().get(name, nullid)
+                old = repo.tags().get(name, repo.nullid)
                 fp.write(b'%s %s\n' % (hex(old), m))
             fp.write(b'%s %s\n' % (hex(node), m))
         fp.close()
@@ -762,8 +761,8 @@
         If an .hgtags does not exist at the specified revision, nullid is
         returned.
         """
-        if node == nullid:
-            return nullid
+        if node == self._repo.nullid:
+            return node
 
         ctx = self._repo[node]
         rev = ctx.rev()
@@ -826,7 +825,7 @@
                 fnode = ctx.filenode(b'.hgtags')
             except error.LookupError:
                 # No .hgtags file on this revision.
-                fnode = nullid
+                fnode = self._repo.nullid
         return fnode
 
     def setfnode(self, node, fnode):
--- a/mercurial/templatefuncs.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/templatefuncs.py	Mon May 17 15:05:24 2021 +0200
@@ -10,10 +10,7 @@
 import re
 
 from .i18n import _
-from .node import (
-    bin,
-    wdirid,
-)
+from .node import bin
 from . import (
     color,
     dagop,
@@ -767,9 +764,10 @@
         )
 
     repo = context.resource(mapping, b'repo')
-    if len(hexnode) > 40:
+    hexnodelen = 2 * repo.nodeconstants.nodelen
+    if len(hexnode) > hexnodelen:
         return hexnode
-    elif len(hexnode) == 40:
+    elif len(hexnode) == hexnodelen:
         try:
             node = bin(hexnode)
         except TypeError:
@@ -778,7 +776,7 @@
         try:
             node = scmutil.resolvehexnodeidprefix(repo, hexnode)
         except error.WdirUnsupported:
-            node = wdirid
+            node = repo.nodeconstants.wdirid
         except error.LookupError:
             return hexnode
         if not node:
--- a/mercurial/templatekw.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/templatekw.py	Mon May 17 15:05:24 2021 +0200
@@ -10,8 +10,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
-    wdirid,
     wdirrev,
 )
 
@@ -29,7 +27,10 @@
     templateutil,
     util,
 )
-from .utils import stringutil
+from .utils import (
+    stringutil,
+    urlutil,
+)
 
 _hybrid = templateutil.hybrid
 hybriddict = templateutil.hybriddict
@@ -412,7 +413,7 @@
 
 def getgraphnodecurrent(repo, ctx, cache):
     wpnodes = repo.dirstate.parents()
-    if wpnodes[1] == nullid:
+    if wpnodes[1] == repo.nullid:
         wpnodes = wpnodes[:1]
     if ctx.node() in wpnodes:
         return b'@'
@@ -525,11 +526,12 @@
     ctx = context.resource(mapping, b'ctx')
     mnode = ctx.manifestnode()
     if mnode is None:
-        mnode = wdirid
+        mnode = repo.nodeconstants.wdirid
         mrev = wdirrev
+        mhex = repo.nodeconstants.wdirhex
     else:
         mrev = repo.manifestlog.rev(mnode)
-    mhex = hex(mnode)
+        mhex = hex(mnode)
     mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
     f = context.process(b'manifest', mapping)
     return templateutil.hybriditem(
@@ -661,17 +663,29 @@
     repo = context.resource(mapping, b'repo')
     # see commands.paths() for naming of dictionary keys
     paths = repo.ui.paths
-    urls = util.sortdict(
-        (k, p.rawloc) for k, p in sorted(pycompat.iteritems(paths))
-    )
+    all_paths = urlutil.list_paths(repo.ui)
+    urls = util.sortdict((k, p.rawloc) for k, p in all_paths)
 
     def makemap(k):
-        p = paths[k]
-        d = {b'name': k, b'url': p.rawloc}
-        d.update((o, v) for o, v in sorted(pycompat.iteritems(p.suboptions)))
+        ps = paths[k]
+        d = {b'name': k}
+        if len(ps) == 1:
+            d[b'url'] = ps[0].rawloc
+            sub_opts = pycompat.iteritems(ps[0].suboptions)
+            sub_opts = util.sortdict(sorted(sub_opts))
+            d.update(sub_opts)
+        path_dict = util.sortdict()
+        for p in ps:
+            sub_opts = util.sortdict(sorted(pycompat.iteritems(p.suboptions)))
+            path_dict[b'url'] = p.rawloc
+            path_dict.update(sub_opts)
+            d[b'urls'] = [path_dict]
         return d
 
-    return _hybrid(None, urls, makemap, lambda k: b'%s=%s' % (k, urls[k]))
+    def format_one(k):
+        return b'%s=%s' % (k, urls[k])
+
+    return _hybrid(None, urls, makemap, format_one)
 
 
 @templatekeyword(b"predecessors", requires={b'repo', b'ctx'})
--- a/mercurial/testing/storage.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/testing/storage.py	Mon May 17 15:05:24 2021 +0200
@@ -11,7 +11,6 @@
 
 from ..node import (
     hex,
-    nullid,
     nullrev,
 )
 from ..pycompat import getattr
@@ -51,7 +50,7 @@
         self.assertFalse(f.hasnode(None))
         self.assertFalse(f.hasnode(0))
         self.assertFalse(f.hasnode(nullrev))
-        self.assertFalse(f.hasnode(nullid))
+        self.assertFalse(f.hasnode(f.nullid))
         self.assertFalse(f.hasnode(b'0'))
         self.assertFalse(f.hasnode(b'a' * 20))
 
@@ -64,8 +63,8 @@
 
         self.assertEqual(list(f.revs(start=20)), [])
 
-        # parents() and parentrevs() work with nullid/nullrev.
-        self.assertEqual(f.parents(nullid), (nullid, nullid))
+        # parents() and parentrevs() work with f.nullid/nullrev.
+        self.assertEqual(f.parents(f.nullid), (f.nullid, f.nullid))
         self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
 
         with self.assertRaises(error.LookupError):
@@ -78,9 +77,9 @@
             with self.assertRaises(IndexError):
                 f.parentrevs(i)
 
-        # nullid/nullrev lookup always works.
-        self.assertEqual(f.rev(nullid), nullrev)
-        self.assertEqual(f.node(nullrev), nullid)
+        # f.nullid/nullrev lookup always works.
+        self.assertEqual(f.rev(f.nullid), nullrev)
+        self.assertEqual(f.node(nullrev), f.nullid)
 
         with self.assertRaises(error.LookupError):
             f.rev(b'\x01' * 20)
@@ -92,16 +91,16 @@
             with self.assertRaises(IndexError):
                 f.node(i)
 
-        self.assertEqual(f.lookup(nullid), nullid)
-        self.assertEqual(f.lookup(nullrev), nullid)
-        self.assertEqual(f.lookup(hex(nullid)), nullid)
-        self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
+        self.assertEqual(f.lookup(f.nullid), f.nullid)
+        self.assertEqual(f.lookup(nullrev), f.nullid)
+        self.assertEqual(f.lookup(hex(f.nullid)), f.nullid)
+        self.assertEqual(f.lookup(b'%d' % nullrev), f.nullid)
 
         with self.assertRaises(error.LookupError):
             f.lookup(b'badvalue')
 
         with self.assertRaises(error.LookupError):
-            f.lookup(hex(nullid)[0:12])
+            f.lookup(hex(f.nullid)[0:12])
 
         with self.assertRaises(error.LookupError):
             f.lookup(b'-2')
@@ -140,19 +139,19 @@
             with self.assertRaises(IndexError):
                 f.iscensored(i)
 
-        self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
+        self.assertEqual(list(f.commonancestorsheads(f.nullid, f.nullid)), [])
 
         with self.assertRaises(ValueError):
             self.assertEqual(list(f.descendants([])), [])
 
         self.assertEqual(list(f.descendants([nullrev])), [])
 
-        self.assertEqual(f.heads(), [nullid])
-        self.assertEqual(f.heads(nullid), [nullid])
-        self.assertEqual(f.heads(None, [nullid]), [nullid])
-        self.assertEqual(f.heads(nullid, [nullid]), [nullid])
+        self.assertEqual(f.heads(), [f.nullid])
+        self.assertEqual(f.heads(f.nullid), [f.nullid])
+        self.assertEqual(f.heads(None, [f.nullid]), [f.nullid])
+        self.assertEqual(f.heads(f.nullid, [f.nullid]), [f.nullid])
 
-        self.assertEqual(f.children(nullid), [])
+        self.assertEqual(f.children(f.nullid), [])
 
         with self.assertRaises(error.LookupError):
             f.children(b'\x01' * 20)
@@ -160,7 +159,7 @@
     def testsinglerevision(self):
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node = f.add(b'initial', None, tr, 0, nullid, nullid)
+            node = f.add(b'initial', None, tr, 0, f.nullid, f.nullid)
 
         self.assertEqual(len(f), 1)
         self.assertEqual(list(f), [0])
@@ -174,7 +173,7 @@
         self.assertTrue(f.hasnode(node))
         self.assertFalse(f.hasnode(hex(node)))
         self.assertFalse(f.hasnode(nullrev))
-        self.assertFalse(f.hasnode(nullid))
+        self.assertFalse(f.hasnode(f.nullid))
         self.assertFalse(f.hasnode(node[0:12]))
         self.assertFalse(f.hasnode(hex(node)[0:20]))
 
@@ -188,7 +187,7 @@
         self.assertEqual(list(f.revs(1, 0)), [1, 0])
         self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
 
-        self.assertEqual(f.parents(node), (nullid, nullid))
+        self.assertEqual(f.parents(node), (f.nullid, f.nullid))
         self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
 
         with self.assertRaises(error.LookupError):
@@ -209,7 +208,7 @@
 
         self.assertEqual(f.lookup(node), node)
         self.assertEqual(f.lookup(0), node)
-        self.assertEqual(f.lookup(-1), nullid)
+        self.assertEqual(f.lookup(-1), f.nullid)
         self.assertEqual(f.lookup(b'0'), node)
         self.assertEqual(f.lookup(hex(node)), node)
 
@@ -256,9 +255,9 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
-            node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+            node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
 
         self.assertEqual(len(f), 3)
         self.assertEqual(list(f), [0, 1, 2])
@@ -284,9 +283,9 @@
         # TODO this is wrong
         self.assertEqual(list(f.revs(3, 2)), [3, 2])
 
-        self.assertEqual(f.parents(node0), (nullid, nullid))
-        self.assertEqual(f.parents(node1), (node0, nullid))
-        self.assertEqual(f.parents(node2), (node1, nullid))
+        self.assertEqual(f.parents(node0), (f.nullid, f.nullid))
+        self.assertEqual(f.parents(node1), (node0, f.nullid))
+        self.assertEqual(f.parents(node2), (node1, f.nullid))
 
         self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
         self.assertEqual(f.parentrevs(1), (0, nullrev))
@@ -330,7 +329,7 @@
         with self.assertRaises(IndexError):
             f.iscensored(3)
 
-        self.assertEqual(f.commonancestorsheads(node1, nullid), [])
+        self.assertEqual(f.commonancestorsheads(node1, f.nullid), [])
         self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
         self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
         self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
@@ -364,12 +363,12 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'1', None, tr, 1, node0, nullid)
-            node2 = f.add(b'2', None, tr, 2, node1, nullid)
-            node3 = f.add(b'3', None, tr, 3, node0, nullid)
-            node4 = f.add(b'4', None, tr, 4, node3, nullid)
-            node5 = f.add(b'5', None, tr, 5, node0, nullid)
+            node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+            node2 = f.add(b'2', None, tr, 2, node1, f.nullid)
+            node3 = f.add(b'3', None, tr, 3, node0, f.nullid)
+            node4 = f.add(b'4', None, tr, 4, node3, f.nullid)
+            node5 = f.add(b'5', None, tr, 5, node0, f.nullid)
 
         self.assertEqual(len(f), 6)
 
@@ -427,24 +426,24 @@
             with self.assertRaises(IndexError):
                 f.size(i)
 
-        self.assertEqual(f.revision(nullid), b'')
-        self.assertEqual(f.rawdata(nullid), b'')
+        self.assertEqual(f.revision(f.nullid), b'')
+        self.assertEqual(f.rawdata(f.nullid), b'')
 
         with self.assertRaises(error.LookupError):
             f.revision(b'\x01' * 20)
 
-        self.assertEqual(f.read(nullid), b'')
+        self.assertEqual(f.read(f.nullid), b'')
 
         with self.assertRaises(error.LookupError):
             f.read(b'\x01' * 20)
 
-        self.assertFalse(f.renamed(nullid))
+        self.assertFalse(f.renamed(f.nullid))
 
         with self.assertRaises(error.LookupError):
             f.read(b'\x01' * 20)
 
-        self.assertTrue(f.cmp(nullid, b''))
-        self.assertTrue(f.cmp(nullid, b'foo'))
+        self.assertTrue(f.cmp(f.nullid, b''))
+        self.assertTrue(f.cmp(f.nullid, b'foo'))
 
         with self.assertRaises(error.LookupError):
             f.cmp(b'\x01' * 20, b'irrelevant')
@@ -455,7 +454,7 @@
             next(gen)
 
         # Emitting null node yields nothing.
-        gen = f.emitrevisions([nullid])
+        gen = f.emitrevisions([f.nullid])
         with self.assertRaises(StopIteration):
             next(gen)
 
@@ -468,7 +467,7 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node = f.add(fulltext, None, tr, 0, nullid, nullid)
+            node = f.add(fulltext, None, tr, 0, f.nullid, f.nullid)
 
         self.assertEqual(f.storageinfo(), {})
         self.assertEqual(
@@ -496,10 +495,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
         self.assertIsNone(rev.delta)
@@ -512,10 +511,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext)
         self.assertIsNone(rev.delta)
@@ -534,9 +533,9 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
-            node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+            node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
 
         self.assertEqual(f.storageinfo(), {})
         self.assertEqual(
@@ -596,10 +595,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext0)
         self.assertIsNone(rev.delta)
@@ -608,7 +607,7 @@
 
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
@@ -622,7 +621,7 @@
 
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
@@ -641,10 +640,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext0)
         self.assertIsNone(rev.delta)
@@ -653,7 +652,7 @@
 
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
@@ -667,7 +666,7 @@
 
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
@@ -700,16 +699,16 @@
         rev = next(gen)
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext2)
         self.assertIsNone(rev.delta)
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         # Delta behavior is storage dependent, so we can't easily test it.
 
         with self.assertRaises(StopIteration):
@@ -722,8 +721,8 @@
         rev = next(gen)
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext1)
         self.assertIsNone(rev.delta)
@@ -731,7 +730,7 @@
         rev = next(gen)
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
@@ -751,7 +750,7 @@
         rev = next(gen)
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
@@ -768,9 +767,9 @@
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
         self.assertEqual(
@@ -789,9 +788,9 @@
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
         self.assertEqual(
@@ -802,7 +801,7 @@
         rev = next(gen)
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertEqual(rev.basenode, node0)
 
         with self.assertRaises(StopIteration):
@@ -841,11 +840,11 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
-            node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, meta1, tr, 1, node0, f.nullid)
+            node2 = f.add(fulltext2, meta2, tr, 2, f.nullid, f.nullid)
 
-        # Metadata header isn't recognized when parent isn't nullid.
+        # Metadata header isn't recognized when parent isn't f.nullid.
         self.assertEqual(f.size(1), len(stored1))
         self.assertEqual(f.size(2), len(fulltext2))
 
@@ -886,8 +885,8 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
+            node0 = f.add(fulltext0, {}, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, meta1, tr, 1, f.nullid, f.nullid)
 
         # TODO this is buggy.
         self.assertEqual(f.size(0), len(fulltext0) + 4)
@@ -916,15 +915,15 @@
         fulltext1 = fulltext0 + b'bar\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         self.assertEqual(len(f), 2)
-        self.assertEqual(f.parents(node1), (node0, nullid))
+        self.assertEqual(f.parents(node1), (node0, f.nullid))
 
         # revision() raises since it performs hash verification.
         with self.assertRaises(error.StorageError):
@@ -951,11 +950,11 @@
         fulltext1 = fulltext0 + b'bar\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         with self.assertRaises(error.StorageError):
@@ -973,11 +972,11 @@
         fulltext1 = fulltext0 + b'bar\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         with self.assertRaises(error.StorageError):
@@ -994,22 +993,22 @@
         fulltext2 = fulltext1 + b'baz\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         with self.assertRaises(error.StorageError):
             f.read(node1)
 
-        node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
+        node2 = storageutil.hashrevisionsha1(fulltext2, node1, f.nullid)
 
         with self._maketransactionfn() as tr:
             delta = mdiff.textdiff(fulltext1, fulltext2)
             self._addrawrevisionfn(
-                f, tr, node2, node1, nullid, 2, delta=(1, delta)
+                f, tr, node2, node1, f.nullid, 2, delta=(1, delta)
             )
 
         self.assertEqual(len(f), 3)
@@ -1029,13 +1028,13 @@
         )
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
 
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, stored1, censored=True
+                f, tr, node1, node0, f.nullid, 1, stored1, censored=True
             )
 
         self.assertTrue(f.iscensored(1))
@@ -1063,13 +1062,13 @@
         )
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
 
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, stored1, censored=True
+                f, tr, node1, node0, f.nullid, 1, stored1, censored=True
             )
 
         with self.assertRaises(error.CensoredNodeError):
@@ -1088,10 +1087,10 @@
     def testaddnoop(self):
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
             # Varying by linkrev shouldn't impact hash.
-            node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
+            node2 = f.add(b'foo', None, tr, 1, f.nullid, f.nullid)
 
         self.assertEqual(node1, node0)
         self.assertEqual(node2, node0)
@@ -1102,7 +1101,9 @@
         with self._maketransactionfn() as tr:
             # Adding a revision with bad node value fails.
             with self.assertRaises(error.StorageError):
-                f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
+                f.addrevision(
+                    b'foo', tr, 0, f.nullid, f.nullid, node=b'\x01' * 20
+                )
 
     def testaddrevisionunknownflag(self):
         f = self._makefilefn()
@@ -1113,7 +1114,7 @@
                     break
 
             with self.assertRaises(error.StorageError):
-                f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
+                f.addrevision(b'foo', tr, 0, f.nullid, f.nullid, flags=flags)
 
     def testaddgroupsimple(self):
         f = self._makefilefn()
@@ -1153,12 +1154,12 @@
         delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
 
         f = self._makefilefn()
 
         deltas = [
-            (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
+            (node0, f.nullid, f.nullid, f.nullid, f.nullid, delta0, 0, {}),
         ]
 
         with self._maketransactionfn() as tr:
@@ -1207,7 +1208,7 @@
         nodes = []
         with self._maketransactionfn() as tr:
             for fulltext in fulltexts:
-                nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
+                nodes.append(f.add(fulltext, None, tr, 0, f.nullid, f.nullid))
 
         f = self._makefilefn()
         deltas = []
@@ -1215,7 +1216,7 @@
             delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
 
             deltas.append(
-                (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+                (nodes[i], f.nullid, f.nullid, f.nullid, f.nullid, delta, 0, {})
             )
 
         with self._maketransactionfn() as tr:
@@ -1254,18 +1255,18 @@
         )
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
 
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, stored1, censored=True
+                f, tr, node1, node0, f.nullid, 1, stored1, censored=True
             )
 
         delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
         deltas = [
-            (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+            (b'\xcc' * 20, node1, f.nullid, b'\x01' * 20, node1, delta, 0, {})
         ]
 
         with self._maketransactionfn() as tr:
@@ -1276,9 +1277,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
-            node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
-            node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
+            node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, f.nullid)
+            node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, f.nullid)
 
         with self._maketransactionfn() as tr:
             f.censorrevision(tr, node1)
@@ -1298,7 +1299,7 @@
 
         with self._maketransactionfn() as tr:
             for rev in range(10):
-                f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
+                f.add(b'%d' % rev, None, tr, rev, f.nullid, f.nullid)
 
         for rev in range(10):
             self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1308,10 +1309,10 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            p1 = nullid
+            p1 = f.nullid
 
             for rev in range(10):
-                f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+                f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
 
         for rev in range(10):
             self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1320,11 +1321,11 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'1', None, tr, 1, node0, nullid)
-            f.add(b'2', None, tr, 2, node1, nullid)
-            f.add(b'3', None, tr, 3, node0, nullid)
-            f.add(b'4', None, tr, 4, node0, nullid)
+            node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+            f.add(b'2', None, tr, 2, node1, f.nullid)
+            f.add(b'3', None, tr, 3, node0, f.nullid)
+            f.add(b'4', None, tr, 4, node0, f.nullid)
 
         for rev in range(5):
             self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1333,9 +1334,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
-            f.add(b'1', None, tr, 10, node0, nullid)
-            f.add(b'2', None, tr, 5, node0, nullid)
+            node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            f.add(b'1', None, tr, 10, node0, f.nullid)
+            f.add(b'2', None, tr, 5, node0, f.nullid)
 
         self.assertEqual(f.getstrippoint(0), (0, set()))
         self.assertEqual(f.getstrippoint(1), (1, set()))
@@ -1362,9 +1363,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            p1 = nullid
+            p1 = f.nullid
             for rev in range(10):
-                p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+                p1 = f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
 
         self.assertEqual(len(f), 10)
 
@@ -1377,9 +1378,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            f.add(b'0', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'1', None, tr, 5, nullid, nullid)
-            node2 = f.add(b'2', None, tr, 10, nullid, nullid)
+            f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'1', None, tr, 5, f.nullid, f.nullid)
+            node2 = f.add(b'2', None, tr, 10, f.nullid, f.nullid)
 
         self.assertEqual(len(f), 3)
 
--- a/mercurial/treediscovery.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/treediscovery.py	Mon May 17 15:05:24 2021 +0200
@@ -10,10 +10,7 @@
 import collections
 
 from .i18n import _
-from .node import (
-    nullid,
-    short,
-)
+from .node import short
 from . import (
     error,
     pycompat,
@@ -44,11 +41,11 @@
     if audit is not None:
         audit[b'total-roundtrips'] = 1
 
-    if repo.changelog.tip() == nullid:
-        base.add(nullid)
-        if heads != [nullid]:
-            return [nullid], [nullid], list(heads)
-        return [nullid], [], heads
+    if repo.changelog.tip() == repo.nullid:
+        base.add(repo.nullid)
+        if heads != [repo.nullid]:
+            return [repo.nullid], [repo.nullid], list(heads)
+        return [repo.nullid], [], heads
 
     # assume we're closer to the tip than the root
     # and start by examining the heads
@@ -84,7 +81,7 @@
                 continue
 
             repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
-            if n[0] == nullid:  # found the end of the branch
+            if n[0] == repo.nullid:  # found the end of the branch
                 pass
             elif n in seenbranch:
                 repo.ui.debug(b"branch already found\n")
@@ -170,7 +167,7 @@
             raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
 
     base = list(base)
-    if base == [nullid]:
+    if base == [repo.nullid]:
         if force:
             repo.ui.warn(_(b"warning: repository is unrelated\n"))
         else:
--- a/mercurial/ui.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/ui.py	Mon May 17 15:05:24 2021 +0200
@@ -886,10 +886,10 @@
         """
         # default is not always a list
         v = self.configwith(
-            config.parselist, section, name, default, b'list', untrusted
+            stringutil.parselist, section, name, default, b'list', untrusted
         )
         if isinstance(v, bytes):
-            return config.parselist(v)
+            return stringutil.parselist(v)
         elif v is None:
             return []
         return v
@@ -1057,6 +1057,8 @@
 
         This method exist as `getpath` need a ui for potential warning message.
         """
+        msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
+        self.deprecwarn(msg, '6.0')
         return self.paths.getpath(self, *args, **kwargs)
 
     @property
--- a/mercurial/unionrepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/unionrepo.py	Mon May 17 15:05:24 2021 +0200
@@ -33,7 +33,7 @@
 
 
 class unionrevlog(revlog.revlog):
-    def __init__(self, opener, indexfile, revlog2, linkmapper):
+    def __init__(self, opener, radix, revlog2, linkmapper):
         # How it works:
         # To retrieve a revision, we just need to know the node id so we can
         # look it up in revlog2.
@@ -41,7 +41,11 @@
         # To differentiate a rev in the second revlog from a rev in the revlog,
         # we check revision against repotiprev.
         opener = vfsmod.readonlyvfs(opener)
-        revlog.revlog.__init__(self, opener, indexfile)
+        target = getattr(revlog2, 'target', None)
+        if target is None:
+            # a revlog wrapper, eg: the manifestlog that is not an actual revlog
+            target = revlog2._revlog.target
+        revlog.revlog.__init__(self, opener, target=target, radix=radix)
         self.revlog2 = revlog2
 
         n = len(self)
@@ -50,7 +54,18 @@
         for rev2 in self.revlog2:
             rev = self.revlog2.index[rev2]
             # rev numbers - in revlog2, very different from self.rev
-            _start, _csize, rsize, base, linkrev, p1rev, p2rev, node = rev
+            (
+                _start,
+                _csize,
+                rsize,
+                base,
+                linkrev,
+                p1rev,
+                p2rev,
+                node,
+                _sdo,
+                _sds,
+            ) = rev
             flags = _start & 0xFFFF
 
             if linkmapper is None:  # link is to same revlog
@@ -82,6 +97,8 @@
                 self.rev(p1node),
                 self.rev(p2node),
                 node,
+                0,  # sidedata offset
+                0,  # sidedata size
             )
             self.index.append(e)
             self.bundlerevs.add(n)
@@ -147,9 +164,7 @@
         changelog.changelog.__init__(self, opener)
         linkmapper = None
         changelog2 = changelog.changelog(opener2)
-        unionrevlog.__init__(
-            self, opener, self.indexfile, changelog2, linkmapper
-        )
+        unionrevlog.__init__(self, opener, self.radix, changelog2, linkmapper)
 
 
 class unionmanifest(unionrevlog, manifest.manifestrevlog):
@@ -157,7 +172,7 @@
         manifest.manifestrevlog.__init__(self, nodeconstants, opener)
         manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
         unionrevlog.__init__(
-            self, opener, self.indexfile, manifest2, linkmapper
+            self, opener, self._revlog.radix, manifest2, linkmapper
         )
 
 
@@ -166,7 +181,7 @@
         filelog.filelog.__init__(self, opener, path)
         filelog2 = filelog.filelog(opener2, path)
         self._revlog = unionrevlog(
-            opener, self.indexfile, filelog2._revlog, linkmapper
+            opener, self._revlog.radix, filelog2._revlog, linkmapper
         )
         self._repo = repo
         self.repotiprev = self._revlog.repotiprev
--- a/mercurial/upgrade_utils/actions.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/upgrade_utils/actions.py	Mon May 17 15:05:24 2021 +0200
@@ -30,6 +30,7 @@
 RECLONES_REQUIREMENTS = {
     requirements.GENERALDELTA_REQUIREMENT,
     requirements.SPARSEREVLOG_REQUIREMENT,
+    requirements.REVLOGV2_REQUIREMENT,
 }
 
 
@@ -935,7 +936,6 @@
     """
     supported = {
         requirements.SPARSEREVLOG_REQUIREMENT,
-        requirements.SIDEDATA_REQUIREMENT,
         requirements.COPIESSDC_REQUIREMENT,
         requirements.NODEMAP_REQUIREMENT,
         requirements.SHARESAFE_REQUIREMENT,
@@ -966,7 +966,6 @@
         requirements.REVLOGV1_REQUIREMENT,  # allowed in case of downgrade
         requirements.STORE_REQUIREMENT,
         requirements.SPARSEREVLOG_REQUIREMENT,
-        requirements.SIDEDATA_REQUIREMENT,
         requirements.COPIESSDC_REQUIREMENT,
         requirements.NODEMAP_REQUIREMENT,
         requirements.SHARESAFE_REQUIREMENT,
@@ -996,7 +995,6 @@
         requirements.FNCACHE_REQUIREMENT,
         requirements.GENERALDELTA_REQUIREMENT,
         requirements.SPARSEREVLOG_REQUIREMENT,
-        requirements.SIDEDATA_REQUIREMENT,
         requirements.COPIESSDC_REQUIREMENT,
         requirements.NODEMAP_REQUIREMENT,
         requirements.SHARESAFE_REQUIREMENT,
--- a/mercurial/upgrade_utils/engine.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/upgrade_utils/engine.py	Mon May 17 15:05:24 2021 +0200
@@ -19,13 +19,32 @@
     metadata,
     pycompat,
     requirements,
-    revlog,
     scmutil,
     store,
     util,
     vfs as vfsmod,
 )
-from ..revlogutils import nodemap
+from ..revlogutils import (
+    constants as revlogconst,
+    flagutil,
+    nodemap,
+    sidedata as sidedatamod,
+)
+
+
+def get_sidedata_helpers(srcrepo, dstrepo):
+    use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+    sequential = pycompat.iswindows or not use_w
+    if not sequential:
+        srcrepo.register_sidedata_computer(
+            revlogconst.KIND_CHANGELOG,
+            sidedatamod.SD_FILES,
+            (sidedatamod.SD_FILES,),
+            metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
+            flagutil.REVIDX_HASCOPIESINFO,
+            replace=True,
+        )
+    return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
 
 
 def _revlogfrompath(repo, rl_type, path):
@@ -61,16 +80,16 @@
 
     oldvfs = oldrl.opener
     newvfs = newrl.opener
-    oldindex = oldvfs.join(oldrl.indexfile)
-    newindex = newvfs.join(newrl.indexfile)
-    olddata = oldvfs.join(oldrl.datafile)
-    newdata = newvfs.join(newrl.datafile)
+    oldindex = oldvfs.join(oldrl._indexfile)
+    newindex = newvfs.join(newrl._indexfile)
+    olddata = oldvfs.join(oldrl._datafile)
+    newdata = newvfs.join(newrl._datafile)
 
-    with newvfs(newrl.indexfile, b'w'):
+    with newvfs(newrl._indexfile, b'w'):
         pass  # create all the directories
 
     util.copyfile(oldindex, newindex)
-    copydata = oldrl.opener.exists(oldrl.datafile)
+    copydata = oldrl.opener.exists(oldrl._datafile)
     if copydata:
         util.copyfile(olddata, newdata)
 
@@ -89,25 +108,6 @@
 )
 
 
-def getsidedatacompanion(srcrepo, dstrepo):
-    sidedatacompanion = None
-    removedreqs = srcrepo.requirements - dstrepo.requirements
-    addedreqs = dstrepo.requirements - srcrepo.requirements
-    if requirements.SIDEDATA_REQUIREMENT in removedreqs:
-
-        def sidedatacompanion(rl, rev):
-            rl = getattr(rl, '_revlog', rl)
-            if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
-                return True, (), {}, 0, 0
-            return False, (), {}, 0, 0
-
-    elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
-        sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
-    elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
-        sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
-    return sidedatacompanion
-
-
 def matchrevlog(revlogfilter, rl_type):
     """check if a revlog is selected for cloning.
 
@@ -131,7 +131,7 @@
     rl_type,
     unencoded,
     upgrade_op,
-    sidedatacompanion,
+    sidedata_helpers,
     oncopiedrevision,
 ):
     """returns the new revlog object created"""
@@ -147,7 +147,7 @@
             addrevisioncb=oncopiedrevision,
             deltareuse=upgrade_op.delta_reuse_mode,
             forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
-            sidedatacompanion=sidedatacompanion,
+            sidedata_helpers=sidedata_helpers,
         )
     else:
         msg = _(b'blindly copying %s containing %i revisions\n')
@@ -257,7 +257,7 @@
     def oncopiedrevision(rl, rev, node):
         progress.increment()
 
-    sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
+    sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
 
     # Migrating filelogs
     ui.status(
@@ -282,7 +282,7 @@
             rl_type,
             unencoded,
             upgrade_op,
-            sidedatacompanion,
+            sidedata_helpers,
             oncopiedrevision,
         )
         info = newrl.storageinfo(storedsize=True)
@@ -322,7 +322,7 @@
             rl_type,
             unencoded,
             upgrade_op,
-            sidedatacompanion,
+            sidedata_helpers,
             oncopiedrevision,
         )
         info = newrl.storageinfo(storedsize=True)
@@ -361,7 +361,7 @@
             rl_type,
             unencoded,
             upgrade_op,
-            sidedatacompanion,
+            sidedata_helpers,
             oncopiedrevision,
         )
         info = newrl.storageinfo(storedsize=True)
--- a/mercurial/util.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/util.py	Mon May 17 15:05:24 2021 +0200
@@ -34,6 +34,7 @@
 import traceback
 import warnings
 
+from .node import hex
 from .thirdparty import attr
 from .pycompat import (
     delattr,
--- a/mercurial/utils/storageutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/utils/storageutil.py	Mon May 17 15:05:24 2021 +0200
@@ -13,8 +13,8 @@
 from ..i18n import _
 from ..node import (
     bin,
-    nullid,
     nullrev,
+    sha1nodeconstants,
 )
 from .. import (
     dagop,
@@ -26,7 +26,11 @@
 from ..revlogutils import sidedata as sidedatamod
 from ..utils import hashutil
 
-_nullhash = hashutil.sha1(nullid)
+_nullhash = hashutil.sha1(sha1nodeconstants.nullid)
+
+# revision data contains extra metadata not part of the official digest
+# Only used in changegroup >= v4.
+CG_FLAG_SIDEDATA = 1
 
 
 def hashrevisionsha1(text, p1, p2):
@@ -37,7 +41,7 @@
     content in the revision graph.
     """
     # As of now, if one of the parent node is null, p2 is null
-    if p2 == nullid:
+    if p2 == sha1nodeconstants.nullid:
         # deep copy of a hash is faster than creating one
         s = _nullhash.copy()
         s.update(p1)
@@ -107,7 +111,7 @@
     Returns ``False`` if the file has no copy metadata. Otherwise a
     2-tuple of the source filename and node.
     """
-    if store.parents(node)[0] != nullid:
+    if store.parents(node)[0] != sha1nodeconstants.nullid:
         return False
 
     meta = parsemeta(store.revision(node))[0]
@@ -360,19 +364,7 @@
     ``assumehaveparentrevisions``
     ``sidedata_helpers`` (optional)
         If not None, means that sidedata should be included.
-        A dictionary of revlog type to tuples of `(repo, computers, removers)`:
-            * `repo` is used as an argument for computers
-            * `computers` is a list of `(category, (keys, computer)` that
-               compute the missing sidedata categories that were asked:
-               * `category` is the sidedata category
-               * `keys` are the sidedata keys to be affected
-               * `computer` is the function `(repo, store, rev, sidedata)` that
-                 returns a new sidedata dict.
-            * `removers` will remove the keys corresponding to the categories
-              that are present, but not needed.
-        If both `computers` and `removers` are empty, sidedata are simply not
-        transformed.
-        Revlog types are `changelog`, `manifest` or `filelog`.
+        See `revlogutil.sidedata.get_sidedata_helpers`.
     """
 
     fnode = store.node
@@ -486,51 +478,43 @@
 
                 available.add(rev)
 
-        sidedata = None
+        serialized_sidedata = None
+        sidedata_flags = (0, 0)
         if sidedata_helpers:
-            sidedata = store.sidedata(rev)
-            sidedata = run_sidedata_helpers(
+            old_sidedata = store.sidedata(rev)
+            sidedata, sidedata_flags = sidedatamod.run_sidedata_helpers(
                 store=store,
                 sidedata_helpers=sidedata_helpers,
-                sidedata=sidedata,
+                sidedata=old_sidedata,
                 rev=rev,
             )
-            sidedata = sidedatamod.serialize_sidedata(sidedata)
+            if sidedata:
+                serialized_sidedata = sidedatamod.serialize_sidedata(sidedata)
+
+        flags = flagsfn(rev) if flagsfn else 0
+        protocol_flags = 0
+        if serialized_sidedata:
+            # Advertise that sidedata exists to the other side
+            protocol_flags |= CG_FLAG_SIDEDATA
+            # Computers and removers can return flags to add and/or remove
+            flags = flags | sidedata_flags[0] & ~sidedata_flags[1]
 
         yield resultcls(
             node=node,
             p1node=fnode(p1rev),
             p2node=fnode(p2rev),
             basenode=fnode(baserev),
-            flags=flagsfn(rev) if flagsfn else 0,
+            flags=flags,
             baserevisionsize=baserevisionsize,
             revision=revision,
             delta=delta,
-            sidedata=sidedata,
+            sidedata=serialized_sidedata,
+            protocol_flags=protocol_flags,
         )
 
         prevrev = rev
 
 
-def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
-    """Returns the sidedata for the given revision after running through
-    the given helpers.
-    - `store`: the revlog this applies to (changelog, manifest, or filelog
-      instance)
-    - `sidedata_helpers`: see `storageutil.emitrevisions`
-    - `sidedata`: previous sidedata at the given rev, if any
-    - `rev`: affected rev of `store`
-    """
-    repo, sd_computers, sd_removers = sidedata_helpers
-    kind = store.revlog_kind
-    for _keys, sd_computer in sd_computers.get(kind, []):
-        sidedata = sd_computer(repo, store, rev, sidedata)
-    for keys, _computer in sd_removers.get(kind, []):
-        for key in keys:
-            sidedata.pop(key, None)
-    return sidedata
-
-
 def deltaiscensored(delta, baserev, baselenfn):
     """Determine if a delta represents censored revision data.
 
--- a/mercurial/utils/stringutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/utils/stringutil.py	Mon May 17 15:05:24 2021 +0200
@@ -868,6 +868,96 @@
     return _booleans.get(s.lower(), None)
 
 
+def parselist(value):
+    """parse a configuration value as a list of comma/space separated strings
+
+    >>> parselist(b'this,is "a small" ,test')
+    ['this', 'is', 'a small', 'test']
+    """
+
+    def _parse_plain(parts, s, offset):
+        whitespace = False
+        while offset < len(s) and (
+            s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
+        ):
+            whitespace = True
+            offset += 1
+        if offset >= len(s):
+            return None, parts, offset
+        if whitespace:
+            parts.append(b'')
+        if s[offset : offset + 1] == b'"' and not parts[-1]:
+            return _parse_quote, parts, offset + 1
+        elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
+            parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
+            return _parse_plain, parts, offset + 1
+        parts[-1] += s[offset : offset + 1]
+        return _parse_plain, parts, offset + 1
+
+    def _parse_quote(parts, s, offset):
+        if offset < len(s) and s[offset : offset + 1] == b'"':  # ""
+            parts.append(b'')
+            offset += 1
+            while offset < len(s) and (
+                s[offset : offset + 1].isspace()
+                or s[offset : offset + 1] == b','
+            ):
+                offset += 1
+            return _parse_plain, parts, offset
+
+        while offset < len(s) and s[offset : offset + 1] != b'"':
+            if (
+                s[offset : offset + 1] == b'\\'
+                and offset + 1 < len(s)
+                and s[offset + 1 : offset + 2] == b'"'
+            ):
+                offset += 1
+                parts[-1] += b'"'
+            else:
+                parts[-1] += s[offset : offset + 1]
+            offset += 1
+
+        if offset >= len(s):
+            real_parts = _configlist(parts[-1])
+            if not real_parts:
+                parts[-1] = b'"'
+            else:
+                real_parts[0] = b'"' + real_parts[0]
+                parts = parts[:-1]
+                parts.extend(real_parts)
+            return None, parts, offset
+
+        offset += 1
+        while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
+            offset += 1
+
+        if offset < len(s):
+            if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
+                parts[-1] += b'"'
+                offset += 1
+            else:
+                parts.append(b'')
+        else:
+            return None, parts, offset
+
+        return _parse_plain, parts, offset
+
+    def _configlist(s):
+        s = s.rstrip(b' ,')
+        if not s:
+            return []
+        parser, parts, offset = _parse_plain, [b''], 0
+        while parser:
+            parser, parts, offset = parser(parts, s, offset)
+        return parts
+
+    if value is not None and isinstance(value, bytes):
+        result = _configlist(value.lstrip(b' ,\n'))
+    else:
+        result = value
+    return result or []
+
+
 def evalpythonliteral(s):
     """Evaluate a string containing a Python literal expression"""
     # We could backport our tokenizer hack to rewrite '' to u'' if we want
--- a/mercurial/utils/urlutil.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/utils/urlutil.py	Mon May 17 15:05:24 2021 +0200
@@ -445,13 +445,41 @@
     return bytes(u)
 
 
+def list_paths(ui, target_path=None):
+    """list all the (name, paths) in the passed ui"""
+    result = []
+    if target_path is None:
+        for name, paths in sorted(pycompat.iteritems(ui.paths)):
+            for p in paths:
+                result.append((name, p))
+
+    else:
+        for path in ui.paths.get(target_path, []):
+            result.append((target_path, path))
+    return result
+
+
+def try_path(ui, url):
+    """try to build a path from a url
+
+    Return None if no Path could built.
+    """
+    try:
+        # we pass the ui instance are warning might need to be issued
+        return path(ui, None, rawloc=url)
+    except ValueError:
+        return None
+
+
 def get_push_paths(repo, ui, dests):
     """yields all the `path` selected as push destination by `dests`"""
     if not dests:
         if b'default-push' in ui.paths:
-            yield ui.paths[b'default-push']
+            for p in ui.paths[b'default-push']:
+                yield p
         elif b'default' in ui.paths:
-            yield ui.paths[b'default']
+            for p in ui.paths[b'default']:
+                yield p
         else:
             raise error.ConfigError(
                 _(b'default repository not configured!'),
@@ -459,7 +487,16 @@
             )
     else:
         for dest in dests:
-            yield ui.getpath(dest)
+            if dest in ui.paths:
+                for p in ui.paths[dest]:
+                    yield p
+            else:
+                path = try_path(ui, dest)
+                if path is None:
+                    msg = _(b'repository %s does not exist')
+                    msg %= dest
+                    raise error.RepoError(msg)
+                yield path
 
 
 def get_pull_paths(repo, ui, sources, default_branches=()):
@@ -468,15 +505,16 @@
         sources = [b'default']
     for source in sources:
         if source in ui.paths:
-            url = ui.paths[source].rawloc
+            for p in ui.paths[source]:
+                yield parseurl(p.rawloc, default_branches)
         else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                url = path(ui, None, rawloc=source).rawloc
-            except ValueError:
+            path = try_path(ui, source)
+            if path is not None:
+                url = path.rawloc
+            else:
                 url = source
-        yield parseurl(url, default_branches)
+            yield parseurl(url, default_branches)
 
 
 def get_unique_push_path(action, repo, ui, dest=None):
@@ -494,7 +532,14 @@
     else:
         dests = [dest]
     dests = list(get_push_paths(repo, ui, dests))
-    assert len(dests) == 1
+    if len(dests) != 1:
+        if dest is None:
+            msg = _("default path points to %d urls while %s only supports one")
+            msg %= (len(dests), action)
+        else:
+            msg = _("path points to %d urls while %s only supports one: %s")
+            msg %= (len(dests), action, dest)
+        raise error.Abort(msg)
     return dests[0]
 
 
@@ -508,45 +553,66 @@
 
     The `action` parameter will be used for the error message.
     """
+    urls = []
     if source is None:
         if b'default' in ui.paths:
-            url = ui.paths[b'default'].rawloc
+            urls.extend(p.rawloc for p in ui.paths[b'default'])
         else:
             # XXX this is the historical default behavior, but that is not
             # great, consider breaking BC on this.
-            url = b'default'
+            urls.append(b'default')
     else:
         if source in ui.paths:
-            url = ui.paths[source].rawloc
+            urls.extend(p.rawloc for p in ui.paths[source])
         else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                url = path(ui, None, rawloc=source).rawloc
-            except ValueError:
-                url = source
-    return parseurl(url, default_branches)
+            path = try_path(ui, source)
+            if path is not None:
+                urls.append(path.rawloc)
+            else:
+                urls.append(source)
+    if len(urls) != 1:
+        if source is None:
+            msg = _("default path points to %d urls while %s only supports one")
+            msg %= (len(urls), action)
+        else:
+            msg = _("path points to %d urls while %s only supports one: %s")
+            msg %= (len(urls), action, source)
+        raise error.Abort(msg)
+    return parseurl(urls[0], default_branches)
 
 
 def get_clone_path(ui, source, default_branches=()):
     """return the `(origsource, path, branch)` selected as clone source"""
+    urls = []
     if source is None:
         if b'default' in ui.paths:
-            url = ui.paths[b'default'].rawloc
+            urls.extend(p.rawloc for p in ui.paths[b'default'])
         else:
             # XXX this is the historical default behavior, but that is not
             # great, consider breaking BC on this.
-            url = b'default'
+            urls.append(b'default')
     else:
         if source in ui.paths:
-            url = ui.paths[source].rawloc
+            urls.extend(p.rawloc for p in ui.paths[source])
         else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                url = path(ui, None, rawloc=source).rawloc
-            except ValueError:
-                url = source
+            path = try_path(ui, source)
+            if path is not None:
+                urls.append(path.rawloc)
+            else:
+                urls.append(source)
+    if len(urls) != 1:
+        if source is None:
+            msg = _(
+                "default path points to %d urls while only one is supported"
+            )
+            msg %= len(urls)
+        else:
+            msg = _("path points to %d urls while only one is supported: %s")
+            msg %= (len(urls), source)
+        raise error.Abort(msg)
+    url = urls[0]
     clone_path, branch = parseurl(url, default_branches)
     return url, clone_path, branch
 
@@ -576,10 +642,13 @@
             if not loc:
                 continue
             loc, sub_opts = ui.configsuboptions(b'paths', name)
-            self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
+            self[name] = [path(ui, name, rawloc=loc, suboptions=sub_opts)]
 
-        for name, p in sorted(self.items()):
-            p.chain_path(ui, self)
+        for name, old_paths in sorted(self.items()):
+            new_paths = []
+            for p in old_paths:
+                new_paths.extend(_chain_path(p, ui, self))
+            self[name] = new_paths
 
     def getpath(self, ui, name, default=None):
         """Return a ``path`` from a string, falling back to default.
@@ -590,6 +659,8 @@
         Returns None if ``name`` is not a registered path, a URI, or a local
         path to a repo.
         """
+        msg = b'getpath is deprecated, use `get_*` functions from urlutil'
+        self.deprecwarn(msg, '6.0')
         # Only fall back to default if no path was requested.
         if name is None:
             if not default:
@@ -598,7 +669,7 @@
                 default = (default,)
             for k in default:
                 try:
-                    return self[k]
+                    return self[k][0]
                 except KeyError:
                     continue
             return None
@@ -607,16 +678,14 @@
         # This may need to raise in the future.
         if not name:
             return None
-
-        try:
-            return self[name]
-        except KeyError:
+        if name in self:
+            return self[name][0]
+        else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                return path(ui, None, rawloc=name)
-            except ValueError:
+            path = try_path(ui, name)
+            if path is None:
                 raise error.RepoError(_(b'repository %s does not exist') % name)
+            return path.rawloc
 
 
 _pathsuboptions = {}
@@ -672,10 +741,43 @@
     return value
 
 
+def _chain_path(base_path, ui, paths):
+    """return the result of "path://" logic applied on a given path"""
+    new_paths = []
+    if base_path.url.scheme != b'path':
+        new_paths.append(base_path)
+    else:
+        assert base_path.url.path is None
+        sub_paths = paths.get(base_path.url.host)
+        if sub_paths is None:
+            m = _(b'cannot use `%s`, "%s" is not a known path')
+            m %= (base_path.rawloc, base_path.url.host)
+            raise error.Abort(m)
+        for subpath in sub_paths:
+            path = base_path.copy()
+            if subpath.raw_url.scheme == b'path':
+                m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
+                m %= (path.rawloc, path.url.host)
+                raise error.Abort(m)
+            path.url = subpath.url
+            path.rawloc = subpath.rawloc
+            path.loc = subpath.loc
+            if path.branch is None:
+                path.branch = subpath.branch
+            else:
+                base = path.rawloc.rsplit(b'#', 1)[0]
+                path.rawloc = b'%s#%s' % (base, path.branch)
+            suboptions = subpath._all_sub_opts.copy()
+            suboptions.update(path._own_sub_opts)
+            path._apply_suboptions(ui, suboptions)
+            new_paths.append(path)
+    return new_paths
+
+
 class path(object):
     """Represents an individual path and its configuration."""
 
-    def __init__(self, ui, name, rawloc=None, suboptions=None):
+    def __init__(self, ui=None, name=None, rawloc=None, suboptions=None):
         """Construct a path from its config options.
 
         ``ui`` is the ``ui`` instance the path is coming from.
@@ -687,6 +789,13 @@
         filesystem path with a .hg directory or b) a URL. If not,
         ``ValueError`` is raised.
         """
+        if ui is None:
+            # used in copy
+            assert name is None
+            assert rawloc is None
+            assert suboptions is None
+            return
+
         if not rawloc:
             raise ValueError(b'rawloc must be defined')
 
@@ -717,30 +826,15 @@
 
         self._apply_suboptions(ui, sub_opts)
 
-    def chain_path(self, ui, paths):
-        if self.url.scheme == b'path':
-            assert self.url.path is None
-            try:
-                subpath = paths[self.url.host]
-            except KeyError:
-                m = _(b'cannot use `%s`, "%s" is not a known path')
-                m %= (self.rawloc, self.url.host)
-                raise error.Abort(m)
-            if subpath.raw_url.scheme == b'path':
-                m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
-                m %= (self.rawloc, self.url.host)
-                raise error.Abort(m)
-            self.url = subpath.url
-            self.rawloc = subpath.rawloc
-            self.loc = subpath.loc
-            if self.branch is None:
-                self.branch = subpath.branch
-            else:
-                base = self.rawloc.rsplit(b'#', 1)[0]
-                self.rawloc = b'%s#%s' % (base, self.branch)
-            suboptions = subpath._all_sub_opts.copy()
-            suboptions.update(self._own_sub_opts)
-            self._apply_suboptions(ui, suboptions)
+    def copy(self):
+        """make a copy of this path object"""
+        new = self.__class__()
+        for k, v in self.__dict__.items():
+            new_copy = getattr(v, 'copy', None)
+            if new_copy is not None:
+                v = new_copy()
+            new.__dict__[k] = v
+        return new
 
     def _validate_path(self):
         # When given a raw location but not a symbolic name, validate the
--- a/mercurial/verify.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/verify.py	Mon May 17 15:05:24 2021 +0200
@@ -10,13 +10,8 @@
 import os
 
 from .i18n import _
-from .node import (
-    nullid,
-    short,
-)
-from .utils import (
-    stringutil,
-)
+from .node import short
+from .utils import stringutil
 
 from . import (
     error,
@@ -56,7 +51,7 @@
         self.warnings = 0
         self.havecl = len(repo.changelog) > 0
         self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
-        self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
+        self.revlogv1 = repo.changelog._format_version != revlog.REVLOGV0
         self.lrugetctx = util.lrucachefunc(repo.unfiltered().__getitem__)
         self.refersmf = False
         self.fncachewarned = False
@@ -107,7 +102,7 @@
         if d[1]:
             self._err(None, _(b"index contains %d extra bytes") % d[1], name)
 
-        if obj.version != revlog.REVLOGV0:
+        if obj._format_version != revlog.REVLOGV0:
             if not self.revlogv1:
                 self._warn(_(b"warning: `%s' uses revlog format 1") % name)
         elif self.revlogv1:
@@ -159,13 +154,13 @@
 
         try:
             p1, p2 = obj.parents(node)
-            if p1 not in seen and p1 != nullid:
+            if p1 not in seen and p1 != self.repo.nullid:
                 self._err(
                     lr,
                     _(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
                     f,
                 )
-            if p2 not in seen and p2 != nullid:
+            if p2 not in seen and p2 != self.repo.nullid:
                 self._err(
                     lr,
                     _(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
@@ -267,7 +262,7 @@
 
             try:
                 changes = cl.read(n)
-                if changes[0] != nullid:
+                if changes[0] != self.repo.nullid:
                     mflinkrevs.setdefault(changes[0], []).append(i)
                     self.refersmf = True
                 for f in changes[3]:
@@ -331,7 +326,7 @@
         if self.refersmf:
             # Do not check manifest if there are only changelog entries with
             # null manifests.
-            self._checkrevlog(mf, label, 0)
+            self._checkrevlog(mf._revlog, label, 0)
         progress = ui.makeprogress(
             _(b'checking'), unit=_(b'manifests'), total=len(mf)
         )
@@ -488,7 +483,7 @@
 
         state = {
             # TODO this assumes revlog storage for changelog.
-            b'expectedversion': self.repo.changelog.version & 0xFFFF,
+            b'expectedversion': self.repo.changelog._format_version,
             b'skipflags': self.skipflags,
             # experimental config: censor.policy
             b'erroroncensored': ui.config(b'censor', b'policy') == b'abort',
@@ -598,7 +593,7 @@
                                 % (rp[0], short(rp[1])),
                                 f,
                             )
-                        elif rp[1] == nullid:
+                        elif rp[1] == self.repo.nullid:
                             ui.note(
                                 _(
                                     b"warning: %s@%s: copy source"
--- a/mercurial/vfs.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/vfs.py	Mon May 17 15:05:24 2021 +0200
@@ -307,7 +307,7 @@
         # multiple instances puts us at risk of running out of file descriptors
         # only allow to use backgroundfilecloser when in main thread.
         if not isinstance(
-            threading.currentThread(),
+            threading.current_thread(),
             threading._MainThread,  # pytype: disable=module-attr
         ):
             yield
@@ -483,7 +483,7 @@
             fp = checkambigatclosing(fp)
 
         if backgroundclose and isinstance(
-            threading.currentThread(),
+            threading.current_thread(),
             threading._MainThread,  # pytype: disable=module-attr
         ):
             if (
--- a/mercurial/wireprotov1server.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/wireprotov1server.py	Mon May 17 15:05:24 2021 +0200
@@ -11,10 +11,7 @@
 import os
 
 from .i18n import _
-from .node import (
-    hex,
-    nullid,
-)
+from .node import hex
 from .pycompat import getattr
 
 from . import (
@@ -470,7 +467,7 @@
         clheads = set(repo.changelog.heads())
         heads = set(opts.get(b'heads', set()))
         common = set(opts.get(b'common', set()))
-        common.discard(nullid)
+        common.discard(repo.nullid)
         if (
             repo.ui.configbool(b'server', b'pullbundle')
             and b'partial-pull' in proto.getprotocaps()
--- a/mercurial/wireprotov2server.py	Fri May 07 10:39:58 2021 +0200
+++ b/mercurial/wireprotov2server.py	Mon May 17 15:05:24 2021 +0200
@@ -10,10 +10,7 @@
 import contextlib
 
 from .i18n import _
-from .node import (
-    hex,
-    nullid,
-)
+from .node import hex
 from . import (
     discovery,
     encoding,
@@ -950,7 +947,7 @@
             if spec[b'roots']:
                 common = [n for n in spec[b'roots'] if clhasnode(n)]
             else:
-                common = [nullid]
+                common = [repo.nullid]
 
             for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
                 if n not in seen:
--- a/relnotes/next	Fri May 07 10:39:58 2021 +0200
+++ b/relnotes/next	Mon May 17 15:05:24 2021 +0200
@@ -1,5 +1,8 @@
 == New Features ==
- 
+
+ * `hg config` now has a `--source` option to show where each
+   configuration value comes from.
+
 
 == Default Format Change ==
 
--- a/rust/Cargo.lock	Fri May 07 10:39:58 2021 +0200
+++ b/rust/Cargo.lock	Mon May 17 15:05:24 2021 +0200
@@ -64,9 +64,9 @@
 
 [[package]]
 name = "bytes-cast"
-version = "0.1.0"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
 dependencies = [
  "bytes-cast-derive",
 ]
@@ -358,6 +358,7 @@
  "format-bytes",
  "home",
  "im-rc",
+ "itertools",
  "lazy_static",
  "log",
  "memmap",
--- a/rust/hg-core/Cargo.toml	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/Cargo.toml	Mon May 17 15:05:24 2021 +0200
@@ -9,11 +9,12 @@
 name = "hg"
 
 [dependencies]
-bytes-cast = "0.1"
+bytes-cast = "0.2"
 byteorder = "1.3.4"
 derive_more = "0.99"
 home = "0.5"
 im-rc = "15.0.*"
+itertools = "0.9"
 lazy_static = "1.4.0"
 rand = "0.7.3"
 rand_pcg = "0.2.1"
--- a/rust/hg-core/src/config/config.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/config/config.rs	Mon May 17 15:05:24 2021 +0200
@@ -361,10 +361,11 @@
     ///
     /// This is appropriate for new configuration keys. The value syntax is
     /// **not** the same as most existing list-valued config, which has Python
-    /// parsing implemented in `parselist()` in `mercurial/config.py`.
-    /// Faithfully porting that parsing algorithm to Rust (including behavior
-    /// that are arguably bugs) turned out to be non-trivial and hasn’t been
-    /// completed as of this writing.
+    /// parsing implemented in `parselist()` in
+    /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing
+    /// algorithm to Rust (including behavior that are arguably bugs)
+    /// turned out to be non-trivial and hasn’t been completed as of this
+    /// writing.
     ///
     /// Instead, the "simple" syntax is: split on comma, then trim leading and
     /// trailing whitespace of each component. Quotes or backslashes are not
--- a/rust/hg-core/src/dirstate.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/dirstate.rs	Mon May 17 15:05:24 2021 +0200
@@ -7,9 +7,9 @@
 
 use crate::errors::HgError;
 use crate::revlog::Node;
-use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::FastHashMap;
 use bytes_cast::{unaligned, BytesCast};
-use std::collections::hash_map;
 use std::convert::TryFrom;
 
 pub mod dirs_multiset;
@@ -35,6 +35,29 @@
     pub size: i32,
 }
 
+impl DirstateEntry {
+    pub fn is_non_normal(&self) -> bool {
+        self.state != EntryState::Normal || self.mtime == MTIME_UNSET
+    }
+
+    pub fn is_from_other_parent(&self) -> bool {
+        self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
+    }
+
+    // TODO: other platforms
+    #[cfg(unix)]
+    pub fn mode_changed(
+        &self,
+        filesystem_metadata: &std::fs::Metadata,
+    ) -> bool {
+        use std::os::unix::fs::MetadataExt;
+        const EXEC_BIT_MASK: u32 = 0o100;
+        let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
+        let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
+        dirstate_exec_bit != fs_exec_bit
+    }
+}
+
 #[derive(BytesCast)]
 #[repr(C)]
 struct RawEntry {
@@ -45,16 +68,20 @@
     length: unaligned::I32Be,
 }
 
+const MTIME_UNSET: i32 = -1;
+
 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
 /// other parent. This allows revert to pick the right status back during a
 /// merge.
 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
 
 pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
-pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
+pub type StateMapIter<'a> =
+    Box<dyn Iterator<Item = (&'a HgPath, &'a DirstateEntry)> + Send + 'a>;
 
 pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
-pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
+pub type CopyMapIter<'a> =
+    Box<dyn Iterator<Item = (&'a HgPath, &'a HgPath)> + Send + 'a>;
 
 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
 pub enum EntryState {
@@ -65,6 +92,16 @@
     Unknown,
 }
 
+impl EntryState {
+    pub fn is_tracked(self) -> bool {
+        use EntryState::*;
+        match self {
+            Normal | Added | Merged => true,
+            Removed | Unknown => false,
+        }
+    }
+}
+
 impl TryFrom<u8> for EntryState {
     type Error = HgError;
 
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs	Mon May 17 15:05:24 2021 +0200
@@ -14,7 +14,7 @@
         files,
         hg_path::{HgPath, HgPathBuf, HgPathError},
     },
-    DirstateEntry, DirstateMapError, FastHashMap, StateMap,
+    DirstateEntry, DirstateMapError, FastHashMap,
 };
 use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
 
@@ -30,17 +30,22 @@
     /// Initializes the multiset from a dirstate.
     ///
     /// If `skip_state` is provided, skips dirstate entries with equal state.
-    pub fn from_dirstate(
-        dirstate: &StateMap,
+    pub fn from_dirstate<'a, I, P>(
+        dirstate: I,
         skip_state: Option<EntryState>,
-    ) -> Result<Self, DirstateMapError> {
+    ) -> Result<Self, DirstateMapError>
+    where
+        I: IntoIterator<Item = (P, &'a DirstateEntry)>,
+        P: AsRef<HgPath>,
+    {
         let mut multiset = DirsMultiset {
             inner: FastHashMap::default(),
         };
-        for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
+        for (filename, entry) in dirstate {
+            let filename = filename.as_ref();
             // This `if` is optimized out of the loop
             if let Some(skip) = skip_state {
-                if skip != *state {
+                if skip != entry.state {
                     multiset.add_path(filename)?;
                 }
             } else {
@@ -207,6 +212,7 @@
 #[cfg(test)]
 mod tests {
     use super::*;
+    use crate::StateMap;
 
     #[test]
     fn test_delete_path_path_not_found() {
@@ -356,7 +362,7 @@
         };
         assert_eq!(expected, new);
 
-        let input_map = ["b/x", "a/c", "a/d/x"]
+        let input_map: HashMap<_, _> = ["b/x", "a/c", "a/d/x"]
             .iter()
             .map(|f| {
                 (
@@ -384,7 +390,7 @@
 
     #[test]
     fn test_dirsmultiset_new_skip() {
-        let input_map = [
+        let input_map: HashMap<_, _> = [
             ("a/", EntryState::Normal),
             ("a/b", EntryState::Normal),
             ("a/c", EntryState::Removed),
--- a/rust/hg-core/src/dirstate/dirstate_map.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs	Mon May 17 15:05:24 2021 +0200
@@ -5,40 +5,28 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::errors::HgError;
-use crate::revlog::node::NULL_NODE;
+use crate::dirstate::parsers::clear_ambiguous_mtime;
+use crate::dirstate::parsers::Timestamp;
 use crate::{
-    dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
+    dirstate::EntryState,
     pack_dirstate, parse_dirstate,
-    utils::{
-        files::normalize_case,
-        hg_path::{HgPath, HgPathBuf},
-    },
+    utils::hg_path::{HgPath, HgPathBuf},
     CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
-    DirstateParents, FastHashMap, StateMap,
+    DirstateParents, StateMap,
 };
 use micro_timer::timed;
 use std::collections::HashSet;
-use std::convert::TryInto;
 use std::iter::FromIterator;
 use std::ops::Deref;
-use std::time::Duration;
-
-pub type FileFoldMap = FastHashMap<HgPathBuf, HgPathBuf>;
-
-const MTIME_UNSET: i32 = -1;
 
 #[derive(Default)]
 pub struct DirstateMap {
     state_map: StateMap,
     pub copy_map: CopyMap,
-    file_fold_map: Option<FileFoldMap>,
     pub dirs: Option<DirsMultiset>,
     pub all_dirs: Option<DirsMultiset>,
     non_normal_set: Option<HashSet<HgPathBuf>>,
     other_parent_set: Option<HashSet<HgPathBuf>>,
-    parents: Option<DirstateParents>,
-    dirty_parents: bool,
 }
 
 /// Should only really be used in python interface code, for clarity
@@ -69,13 +57,8 @@
     pub fn clear(&mut self) {
         self.state_map = StateMap::default();
         self.copy_map.clear();
-        self.file_fold_map = None;
         self.non_normal_set = None;
         self.other_parent_set = None;
-        self.set_parents(&DirstateParents {
-            p1: NULL_NODE,
-            p2: NULL_NODE,
-        })
     }
 
     /// Add a tracked file to the dirstate
@@ -98,13 +81,13 @@
         }
         self.state_map.insert(filename.to_owned(), entry.to_owned());
 
-        if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET {
+        if entry.is_non_normal() {
             self.get_non_normal_other_parent_entries()
                 .0
                 .insert(filename.to_owned());
         }
 
-        if entry.size == SIZE_FROM_OTHER_PARENT {
+        if entry.is_from_other_parent() {
             self.get_non_normal_other_parent_entries()
                 .1
                 .insert(filename.to_owned());
@@ -135,9 +118,6 @@
             }
         }
 
-        if let Some(ref mut file_fold_map) = self.file_fold_map {
-            file_fold_map.remove(&normalize_case(filename));
-        }
         self.state_map.insert(
             filename.to_owned(),
             DirstateEntry {
@@ -172,9 +152,6 @@
                 all_dirs.delete_path(filename)?;
             }
         }
-        if let Some(ref mut file_fold_map) = self.file_fold_map {
-            file_fold_map.remove(&normalize_case(filename));
-        }
         self.get_non_normal_other_parent_entries()
             .0
             .remove(filename);
@@ -188,32 +165,22 @@
         now: i32,
     ) {
         for filename in filenames {
-            let mut changed = false;
             if let Some(entry) = self.state_map.get_mut(&filename) {
-                if entry.state == EntryState::Normal && entry.mtime == now {
-                    changed = true;
-                    *entry = DirstateEntry {
-                        mtime: MTIME_UNSET,
-                        ..*entry
-                    };
+                if clear_ambiguous_mtime(entry, now) {
+                    self.get_non_normal_other_parent_entries()
+                        .0
+                        .insert(filename.to_owned());
                 }
             }
-            if changed {
-                self.get_non_normal_other_parent_entries()
-                    .0
-                    .insert(filename.to_owned());
-            }
         }
     }
 
-    pub fn non_normal_entries_remove(
-        &mut self,
-        key: impl AsRef<HgPath>,
-    ) -> bool {
+    pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
         self.get_non_normal_other_parent_entries()
             .0
-            .remove(key.as_ref())
+            .remove(key.as_ref());
     }
+
     pub fn non_normal_entries_union(
         &mut self,
         other: HashSet<HgPathBuf>,
@@ -264,18 +231,11 @@
         let mut non_normal = HashSet::new();
         let mut other_parent = HashSet::new();
 
-        for (
-            filename,
-            DirstateEntry {
-                state, size, mtime, ..
-            },
-        ) in self.state_map.iter()
-        {
-            if *state != EntryState::Normal || *mtime == MTIME_UNSET {
+        for (filename, entry) in self.state_map.iter() {
+            if entry.is_non_normal() {
                 non_normal.insert(filename.to_owned());
             }
-            if *state == EntryState::Normal && *size == SIZE_FROM_OTHER_PARENT
-            {
+            if entry.is_from_other_parent() {
                 other_parent.insert(filename.to_owned());
             }
         }
@@ -289,8 +249,10 @@
     /// good idea.
     pub fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
         if self.all_dirs.is_none() {
-            self.all_dirs =
-                Some(DirsMultiset::from_dirstate(&self.state_map, None)?);
+            self.all_dirs = Some(DirsMultiset::from_dirstate(
+                self.state_map.iter(),
+                None,
+            )?);
         }
         Ok(())
     }
@@ -298,7 +260,7 @@
     pub fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
         if self.dirs.is_none() {
             self.dirs = Some(DirsMultiset::from_dirstate(
-                &self.state_map,
+                self.state_map.iter(),
                 Some(EntryState::Removed),
             )?);
         }
@@ -321,46 +283,11 @@
         Ok(self.all_dirs.as_ref().unwrap().contains(directory))
     }
 
-    pub fn parents(
+    #[timed]
+    pub fn read(
         &mut self,
         file_contents: &[u8],
-    ) -> Result<&DirstateParents, DirstateError> {
-        if let Some(ref parents) = self.parents {
-            return Ok(parents);
-        }
-        let parents;
-        if file_contents.len() == PARENT_SIZE * 2 {
-            parents = DirstateParents {
-                p1: file_contents[..PARENT_SIZE].try_into().unwrap(),
-                p2: file_contents[PARENT_SIZE..PARENT_SIZE * 2]
-                    .try_into()
-                    .unwrap(),
-            };
-        } else if file_contents.is_empty() {
-            parents = DirstateParents {
-                p1: NULL_NODE,
-                p2: NULL_NODE,
-            };
-        } else {
-            return Err(
-                HgError::corrupted("Dirstate appears to be damaged").into()
-            );
-        }
-
-        self.parents = Some(parents);
-        Ok(self.parents.as_ref().unwrap())
-    }
-
-    pub fn set_parents(&mut self, parents: &DirstateParents) {
-        self.parents = Some(parents.clone());
-        self.dirty_parents = true;
-    }
-
-    #[timed]
-    pub fn read<'a>(
-        &mut self,
-        file_contents: &'a [u8],
-    ) -> Result<Option<&'a DirstateParents>, DirstateError> {
+    ) -> Result<Option<DirstateParents>, DirstateError> {
         if file_contents.is_empty() {
             return Ok(None);
         }
@@ -376,42 +303,20 @@
                 .into_iter()
                 .map(|(path, copy)| (path.to_owned(), copy.to_owned())),
         );
-
-        if !self.dirty_parents {
-            self.set_parents(&parents);
-        }
-
-        Ok(Some(parents))
+        Ok(Some(parents.clone()))
     }
 
     pub fn pack(
         &mut self,
         parents: DirstateParents,
-        now: Duration,
+        now: Timestamp,
     ) -> Result<Vec<u8>, DirstateError> {
         let packed =
             pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
 
-        self.dirty_parents = false;
-
         self.set_non_normal_other_parent_entries(true);
         Ok(packed)
     }
-    pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
-        if let Some(ref file_fold_map) = self.file_fold_map {
-            return file_fold_map;
-        }
-        let mut new_file_fold_map = FileFoldMap::default();
-
-        for (filename, DirstateEntry { state, .. }) in self.state_map.iter() {
-            if *state != EntryState::Removed {
-                new_file_fold_map
-                    .insert(normalize_case(&filename), filename.to_owned());
-            }
-        }
-        self.file_fold_map = Some(new_file_fold_map);
-        self.file_fold_map.as_ref().unwrap()
-    }
 }
 
 #[cfg(test)]
--- a/rust/hg-core/src/dirstate/parsers.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/dirstate/parsers.rs	Mon May 17 15:05:24 2021 +0200
@@ -13,7 +13,6 @@
 use bytes_cast::BytesCast;
 use micro_timer::timed;
 use std::convert::{TryFrom, TryInto};
-use std::time::Duration;
 
 /// Parents are stored in the dirstate as byte hashes.
 pub const PARENT_SIZE: usize = 20;
@@ -35,10 +34,23 @@
 }
 
 #[timed]
-pub fn parse_dirstate(mut contents: &[u8]) -> Result<ParseResult, HgError> {
+pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
     let mut copies = Vec::new();
     let mut entries = Vec::new();
+    let parents =
+        parse_dirstate_entries(contents, |path, entry, copy_source| {
+            if let Some(source) = copy_source {
+                copies.push((path, source));
+            }
+            entries.push((path, *entry));
+        })?;
+    Ok((parents, entries, copies))
+}
 
+pub fn parse_dirstate_entries<'a>(
+    mut contents: &'a [u8],
+    mut each_entry: impl FnMut(&'a HgPath, &DirstateEntry, Option<&'a HgPath>),
+) -> Result<&'a DirstateParents, HgError> {
     let (parents, rest) = DirstateParents::from_bytes(contents)
         .map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
     contents = rest;
@@ -62,34 +74,92 @@
         let path = HgPath::new(
             iter.next().expect("splitn always yields at least one item"),
         );
-        if let Some(copy_source) = iter.next() {
-            copies.push((path, HgPath::new(copy_source)));
-        }
+        let copy_source = iter.next().map(HgPath::new);
+        each_entry(path, &entry, copy_source);
 
-        entries.push((path, entry));
         contents = rest;
     }
-    Ok((parents, entries, copies))
+    Ok(parents)
+}
+
+fn packed_filename_and_copy_source_size(
+    filename: &HgPath,
+    copy_source: Option<&HgPath>,
+) -> usize {
+    filename.len()
+        + if let Some(source) = copy_source {
+            b"\0".len() + source.len()
+        } else {
+            0
+        }
+}
+
+pub fn packed_entry_size(
+    filename: &HgPath,
+    copy_source: Option<&HgPath>,
+) -> usize {
+    MIN_ENTRY_SIZE
+        + packed_filename_and_copy_source_size(filename, copy_source)
 }
 
-/// `now` is the duration in seconds since the Unix epoch
+pub fn pack_entry(
+    filename: &HgPath,
+    entry: &DirstateEntry,
+    copy_source: Option<&HgPath>,
+    packed: &mut Vec<u8>,
+) {
+    let length = packed_filename_and_copy_source_size(filename, copy_source);
+
+    // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
+    packed.write_u8(entry.state.into()).unwrap();
+    packed.write_i32::<BigEndian>(entry.mode).unwrap();
+    packed.write_i32::<BigEndian>(entry.size).unwrap();
+    packed.write_i32::<BigEndian>(entry.mtime).unwrap();
+    packed.write_i32::<BigEndian>(length as i32).unwrap();
+    packed.extend(filename.as_bytes());
+    if let Some(source) = copy_source {
+        packed.push(b'\0');
+        packed.extend(source.as_bytes());
+    }
+}
+
+/// Seconds since the Unix epoch
+pub struct Timestamp(pub u64);
+
+pub fn clear_ambiguous_mtime(
+    entry: &mut DirstateEntry,
+    mtime_now: i32,
+) -> bool {
+    let ambiguous =
+        entry.state == EntryState::Normal && entry.mtime == mtime_now;
+    if ambiguous {
+        // The file was last modified "simultaneously" with the current
+        // write to dirstate (i.e. within the same second for file-
+        // systems with a granularity of 1 sec). This commonly happens
+        // for at least a couple of files on 'update'.
+        // The user could change the file without changing its size
+        // within the same second. Invalidate the file's mtime in
+        // dirstate, forcing future 'status' calls to compare the
+        // contents of the file if the size is the same. This prevents
+        // mistakenly treating such files as clean.
+        entry.mtime = -1;
+    }
+    ambiguous
+}
+
 pub fn pack_dirstate(
     state_map: &mut StateMap,
     copy_map: &CopyMap,
     parents: DirstateParents,
-    now: Duration,
+    now: Timestamp,
 ) -> Result<Vec<u8>, HgError> {
     // TODO move away from i32 before 2038.
-    let now: i32 = now.as_secs().try_into().expect("time overflow");
+    let now: i32 = now.0.try_into().expect("time overflow");
 
     let expected_size: usize = state_map
         .iter()
         .map(|(filename, _)| {
-            let mut length = MIN_ENTRY_SIZE + filename.len();
-            if let Some(copy) = copy_map.get(filename) {
-                length += copy.len() + 1;
-            }
-            length
+            packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
         })
         .sum();
     let expected_size = expected_size + PARENT_SIZE * 2;
@@ -100,39 +170,13 @@
     packed.extend(parents.p2.as_bytes());
 
     for (filename, entry) in state_map.iter_mut() {
-        let new_filename = filename.to_owned();
-        let mut new_mtime: i32 = entry.mtime;
-        if entry.state == EntryState::Normal && entry.mtime == now {
-            // The file was last modified "simultaneously" with the current
-            // write to dirstate (i.e. within the same second for file-
-            // systems with a granularity of 1 sec). This commonly happens
-            // for at least a couple of files on 'update'.
-            // The user could change the file without changing its size
-            // within the same second. Invalidate the file's mtime in
-            // dirstate, forcing future 'status' calls to compare the
-            // contents of the file if the size is the same. This prevents
-            // mistakenly treating such files as clean.
-            new_mtime = -1;
-            *entry = DirstateEntry {
-                mtime: new_mtime,
-                ..*entry
-            };
-        }
-        let mut new_filename = new_filename.into_vec();
-        if let Some(copy) = copy_map.get(filename) {
-            new_filename.push(b'\0');
-            new_filename.extend(copy.bytes());
-        }
-
-        // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
-        packed.write_u8(entry.state.into()).unwrap();
-        packed.write_i32::<BigEndian>(entry.mode).unwrap();
-        packed.write_i32::<BigEndian>(entry.size).unwrap();
-        packed.write_i32::<BigEndian>(new_mtime).unwrap();
-        packed
-            .write_i32::<BigEndian>(new_filename.len() as i32)
-            .unwrap();
-        packed.extend(new_filename)
+        clear_ambiguous_mtime(entry, now);
+        pack_entry(
+            filename,
+            entry,
+            copy_map.get(filename).map(|p| &**p),
+            &mut packed,
+        )
     }
 
     if packed.len() != expected_size {
@@ -160,7 +204,7 @@
             p1: b"12345678910111213141".into(),
             p2: b"00000000000000000000".into(),
         };
-        let now = Duration::new(15000000, 0);
+        let now = Timestamp(15000000);
         let expected = b"1234567891011121314100000000000000000000".to_vec();
 
         assert_eq!(
@@ -191,7 +235,7 @@
             p1: b"12345678910111213141".into(),
             p2: b"00000000000000000000".into(),
         };
-        let now = Duration::new(15000000, 0);
+        let now = Timestamp(15000000);
         let expected = [
             49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
             51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
@@ -231,7 +275,7 @@
             p1: b"12345678910111213141".into(),
             p2: b"00000000000000000000".into(),
         };
-        let now = Duration::new(15000000, 0);
+        let now = Timestamp(15000000);
         let expected = [
             49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
             51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
@@ -271,7 +315,7 @@
             p1: b"12345678910111213141".into(),
             p2: b"00000000000000000000".into(),
         };
-        let now = Duration::new(15000000, 0);
+        let now = Timestamp(15000000);
         let result =
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
@@ -349,7 +393,7 @@
             p1: b"12345678910111213141".into(),
             p2: b"00000000000000000000".into(),
         };
-        let now = Duration::new(15000000, 0);
+        let now = Timestamp(15000000);
         let result =
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
@@ -395,7 +439,7 @@
             p1: b"12345678910111213141".into(),
             p2: b"00000000000000000000".into(),
         };
-        let now = Duration::new(15000000, 0);
+        let now = Timestamp(15000000);
         let result =
             pack_dirstate(&mut state_map, &copymap, parents.clone(), now)
                 .unwrap();
--- a/rust/hg-core/src/dirstate/status.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/dirstate/status.rs	Mon May 17 15:05:24 2021 +0200
@@ -95,9 +95,10 @@
 
 type IoResult<T> = std::io::Result<T>;
 
-/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait, 'static>`, so add
+/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
 /// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
-type IgnoreFnType<'a> = Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
+pub type IgnoreFnType<'a> =
+    Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
 
 /// We have a good mix of owned (from directory traversal) and borrowed (from
 /// the dirstate/explicit) paths, this comes up a lot.
@@ -254,16 +255,41 @@
     pub collect_traversed_dirs: bool,
 }
 
-#[derive(Debug)]
+#[derive(Debug, Default)]
 pub struct DirstateStatus<'a> {
+    /// Tracked files whose contents have changed since the parent revision
     pub modified: Vec<HgPathCow<'a>>,
+
+    /// Newly-tracked files that were not present in the parent
     pub added: Vec<HgPathCow<'a>>,
+
+    /// Previously-tracked files that have been (re)moved with an hg command
     pub removed: Vec<HgPathCow<'a>>,
+
+    /// (Still) tracked files that are missing, (re)moved with an non-hg
+    /// command
     pub deleted: Vec<HgPathCow<'a>>,
+
+    /// Tracked files that are up to date with the parent.
+    /// Only pupulated if `StatusOptions::list_clean` is true.
     pub clean: Vec<HgPathCow<'a>>,
+
+    /// Files in the working directory that are ignored with `.hgignore`.
+    /// Only pupulated if `StatusOptions::list_ignored` is true.
     pub ignored: Vec<HgPathCow<'a>>,
+
+    /// Files in the working directory that are neither tracked nor ignored.
+    /// Only pupulated if `StatusOptions::list_unknown` is true.
     pub unknown: Vec<HgPathCow<'a>>,
+
+    /// Was explicitly matched but cannot be found/accessed
     pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
+
+    /// Either clean or modified, but we can’t tell from filesystem metadata
+    /// alone. The file contents need to be read and compared with that in
+    /// the parent.
+    pub unsure: Vec<HgPathCow<'a>>,
+
     /// Only filled if `collect_traversed_dirs` is `true`
     pub traversed: Vec<HgPathBuf>,
 }
@@ -292,7 +318,7 @@
 
 /// Gives information about which files are changed in the working directory
 /// and how, compared to the revision we're based on
-pub struct Status<'a, M: Matcher + Sync> {
+pub struct Status<'a, M: ?Sized + Matcher + Sync> {
     dmap: &'a DirstateMap,
     pub(crate) matcher: &'a M,
     root_dir: PathBuf,
@@ -302,7 +328,7 @@
 
 impl<'a, M> Status<'a, M>
 where
-    M: Matcher + Sync,
+    M: ?Sized + Matcher + Sync,
 {
     pub fn new(
         dmap: &'a DirstateMap,
@@ -847,8 +873,8 @@
 pub fn build_response<'a>(
     results: impl IntoIterator<Item = DispatchedPath<'a>>,
     traversed: Vec<HgPathBuf>,
-) -> (Vec<HgPathCow<'a>>, DirstateStatus<'a>) {
-    let mut lookup = vec![];
+) -> DirstateStatus<'a> {
+    let mut unsure = vec![];
     let mut modified = vec![];
     let mut added = vec![];
     let mut removed = vec![];
@@ -861,7 +887,7 @@
     for (filename, dispatch) in results.into_iter() {
         match dispatch {
             Dispatch::Unknown => unknown.push(filename),
-            Dispatch::Unsure => lookup.push(filename),
+            Dispatch::Unsure => unsure.push(filename),
             Dispatch::Modified => modified.push(filename),
             Dispatch::Added => added.push(filename),
             Dispatch::Removed => removed.push(filename),
@@ -874,20 +900,18 @@
         }
     }
 
-    (
-        lookup,
-        DirstateStatus {
-            modified,
-            added,
-            removed,
-            deleted,
-            clean,
-            ignored,
-            unknown,
-            bad,
-            traversed,
-        },
-    )
+    DirstateStatus {
+        modified,
+        added,
+        removed,
+        deleted,
+        clean,
+        ignored,
+        unknown,
+        bad,
+        unsure,
+        traversed,
+    }
 }
 
 /// Get the status of files in the working directory.
@@ -898,14 +922,11 @@
 #[timed]
 pub fn status<'a>(
     dmap: &'a DirstateMap,
-    matcher: &'a (impl Matcher + Sync),
+    matcher: &'a (dyn Matcher + Sync),
     root_dir: PathBuf,
     ignore_files: Vec<PathBuf>,
     options: StatusOptions,
-) -> StatusResult<(
-    (Vec<HgPathCow<'a>>, DirstateStatus<'a>),
-    Vec<PatternFileWarning>,
-)> {
+) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
     let (status, warnings) =
         Status::new(dmap, matcher, root_dir, ignore_files, options)?;
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,4 @@
+pub mod dirstate_map;
+pub mod dispatch;
+pub mod path_with_basename;
+mod status;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,627 @@
+use bytes_cast::BytesCast;
+use micro_timer::timed;
+use std::borrow::Cow;
+use std::convert::TryInto;
+use std::path::PathBuf;
+
+use super::path_with_basename::WithBasename;
+use crate::dirstate::parsers::clear_ambiguous_mtime;
+use crate::dirstate::parsers::pack_entry;
+use crate::dirstate::parsers::packed_entry_size;
+use crate::dirstate::parsers::parse_dirstate_entries;
+use crate::dirstate::parsers::Timestamp;
+use crate::matchers::Matcher;
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::CopyMapIter;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateMapError;
+use crate::DirstateParents;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::FastHashMap;
+use crate::PatternFileWarning;
+use crate::StateMapIter;
+use crate::StatusError;
+use crate::StatusOptions;
+
+pub struct DirstateMap<'on_disk> {
+    /// Contents of the `.hg/dirstate` file
+    on_disk: &'on_disk [u8],
+
+    pub(super) root: ChildNodes<'on_disk>,
+
+    /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
+    nodes_with_entry_count: usize,
+
+    /// Number of nodes anywhere in the tree that have
+    /// `.copy_source.is_some()`.
+    nodes_with_copy_source_count: usize,
+}
+
+/// Using a plain `HgPathBuf` of the full path from the repository root as a
+/// map key would also work: all paths in a given map have the same parent
+/// path, so comparing full paths gives the same result as comparing base
+/// names. However `BTreeMap` would waste time always re-comparing the same
+/// string prefix.
+pub(super) type ChildNodes<'on_disk> =
+    FastHashMap<WithBasename<Cow<'on_disk, HgPath>>, Node<'on_disk>>;
+
+/// Represents a file or a directory
+#[derive(Default)]
+pub(super) struct Node<'on_disk> {
+    /// `None` for directories
+    pub(super) entry: Option<DirstateEntry>,
+
+    pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
+
+    pub(super) children: ChildNodes<'on_disk>,
+
+    /// How many (non-inclusive) descendants of this node are tracked files
+    tracked_descendants_count: usize,
+}
+
+impl Node<'_> {
+    pub(super) fn state(&self) -> Option<EntryState> {
+        self.entry.as_ref().map(|entry| entry.state)
+    }
+}
+
+/// `(full_path, entry, copy_source)`
+type NodeDataMut<'tree, 'on_disk> = (
+    &'tree HgPath,
+    &'tree mut Option<DirstateEntry>,
+    &'tree mut Option<Cow<'on_disk, HgPath>>,
+);
+
+impl<'on_disk> DirstateMap<'on_disk> {
+    pub fn new(
+        on_disk: &'on_disk [u8],
+    ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+        let mut map = Self {
+            on_disk,
+            root: ChildNodes::default(),
+            nodes_with_entry_count: 0,
+            nodes_with_copy_source_count: 0,
+        };
+        let parents = map.read()?;
+        Ok((map, parents))
+    }
+
+    /// Should only be called in `new`
+    #[timed]
+    fn read(&mut self) -> Result<Option<DirstateParents>, DirstateError> {
+        if self.on_disk.is_empty() {
+            return Ok(None);
+        }
+
+        let parents = parse_dirstate_entries(
+            self.on_disk,
+            |path, entry, copy_source| {
+                let tracked = entry.state.is_tracked();
+                let node = Self::get_or_insert_node(
+                    &mut self.root,
+                    path,
+                    WithBasename::to_cow_borrowed,
+                    |ancestor| {
+                        if tracked {
+                            ancestor.tracked_descendants_count += 1
+                        }
+                    },
+                );
+                assert!(
+                    node.entry.is_none(),
+                    "duplicate dirstate entry in read"
+                );
+                assert!(
+                    node.copy_source.is_none(),
+                    "duplicate dirstate entry in read"
+                );
+                node.entry = Some(*entry);
+                node.copy_source = copy_source.map(Cow::Borrowed);
+                self.nodes_with_entry_count += 1;
+                if copy_source.is_some() {
+                    self.nodes_with_copy_source_count += 1
+                }
+            },
+        )?;
+
+        Ok(Some(parents.clone()))
+    }
+
+    fn get_node(&self, path: &HgPath) -> Option<&Node> {
+        let mut children = &self.root;
+        let mut components = path.components();
+        let mut component =
+            components.next().expect("expected at least one components");
+        loop {
+            let child = children.get(component)?;
+            if let Some(next_component) = components.next() {
+                component = next_component;
+                children = &child.children;
+            } else {
+                return Some(child);
+            }
+        }
+    }
+
+    /// Returns a mutable reference to the node at `path` if it exists
+    ///
+    /// This takes `root` instead of `&mut self` so that callers can mutate
+    /// other fields while the returned borrow is still valid
+    fn get_node_mut<'tree>(
+        root: &'tree mut ChildNodes<'on_disk>,
+        path: &HgPath,
+    ) -> Option<&'tree mut Node<'on_disk>> {
+        let mut children = root;
+        let mut components = path.components();
+        let mut component =
+            components.next().expect("expected at least one components");
+        loop {
+            let child = children.get_mut(component)?;
+            if let Some(next_component) = components.next() {
+                component = next_component;
+                children = &mut child.children;
+            } else {
+                return Some(child);
+            }
+        }
+    }
+
+    fn get_or_insert_node<'tree, 'path>(
+        root: &'tree mut ChildNodes<'on_disk>,
+        path: &'path HgPath,
+        to_cow: impl Fn(
+            WithBasename<&'path HgPath>,
+        ) -> WithBasename<Cow<'on_disk, HgPath>>,
+        mut each_ancestor: impl FnMut(&mut Node),
+    ) -> &'tree mut Node<'on_disk> {
+        let mut child_nodes = root;
+        let mut inclusive_ancestor_paths =
+            WithBasename::inclusive_ancestors_of(path);
+        let mut ancestor_path = inclusive_ancestor_paths
+            .next()
+            .expect("expected at least one inclusive ancestor");
+        loop {
+            // TODO: can we avoid allocating an owned key in cases where the
+            // map already contains that key, without introducing double
+            // lookup?
+            let child_node =
+                child_nodes.entry(to_cow(ancestor_path)).or_default();
+            if let Some(next) = inclusive_ancestor_paths.next() {
+                each_ancestor(child_node);
+                ancestor_path = next;
+                child_nodes = &mut child_node.children;
+            } else {
+                return child_node;
+            }
+        }
+    }
+
+    fn add_or_remove_file(
+        &mut self,
+        path: &HgPath,
+        old_state: EntryState,
+        new_entry: DirstateEntry,
+    ) {
+        let tracked_count_increment =
+            match (old_state.is_tracked(), new_entry.state.is_tracked()) {
+                (false, true) => 1,
+                (true, false) => -1,
+                _ => 0,
+            };
+
+        let node = Self::get_or_insert_node(
+            &mut self.root,
+            path,
+            WithBasename::to_cow_owned,
+            |ancestor| {
+                // We can’t use `+= increment` because the counter is unsigned,
+                // and we want debug builds to detect accidental underflow
+                // through zero
+                match tracked_count_increment {
+                    1 => ancestor.tracked_descendants_count += 1,
+                    -1 => ancestor.tracked_descendants_count -= 1,
+                    _ => {}
+                }
+            },
+        );
+        if node.entry.is_none() {
+            self.nodes_with_entry_count += 1
+        }
+        node.entry = Some(new_entry)
+    }
+
+    fn iter_nodes<'a>(
+        &'a self,
+    ) -> impl Iterator<Item = (&'a HgPath, &'a Node)> + 'a {
+        // Depth first tree traversal.
+        //
+        // If we could afford internal iteration and recursion,
+        // this would look like:
+        //
+        // ```
+        // fn traverse_children(
+        //     children: &ChildNodes,
+        //     each: &mut impl FnMut(&Node),
+        // ) {
+        //     for child in children.values() {
+        //         traverse_children(&child.children, each);
+        //         each(child);
+        //     }
+        // }
+        // ```
+        //
+        // However we want an external iterator and therefore can’t use the
+        // call stack. Use an explicit stack instead:
+        let mut stack = Vec::new();
+        let mut iter = self.root.iter();
+        std::iter::from_fn(move || {
+            while let Some((key, child_node)) = iter.next() {
+                // Pseudo-recursion
+                let new_iter = child_node.children.iter();
+                let old_iter = std::mem::replace(&mut iter, new_iter);
+                let key = &**key.full_path();
+                stack.push((key, child_node, old_iter));
+            }
+            // Found the end of a `children.iter()` iterator.
+            if let Some((key, child_node, next_iter)) = stack.pop() {
+                // "Return" from pseudo-recursion by restoring state from the
+                // explicit stack
+                iter = next_iter;
+
+                Some((key, child_node))
+            } else {
+                // Reached the bottom of the stack, we’re done
+                None
+            }
+        })
+    }
+
+    /// Mutable iterator for the `(entry, copy source)` of each node.
+    ///
+    /// It would not be safe to yield mutable references to nodes themeselves
+    /// with `-> impl Iterator<Item = &mut Node>` since child nodes are
+    /// reachable from their ancestor nodes, potentially creating multiple
+    /// `&mut` references to a given node.
+    fn iter_node_data_mut<'tree>(
+        &'tree mut self,
+    ) -> impl Iterator<Item = NodeDataMut<'tree, 'on_disk>> + 'tree {
+        // Explict stack for pseudo-recursion, see `iter_nodes` above.
+        let mut stack = Vec::new();
+        let mut iter = self.root.iter_mut();
+        std::iter::from_fn(move || {
+            while let Some((key, child_node)) = iter.next() {
+                // Pseudo-recursion
+                let data = (
+                    &**key.full_path(),
+                    &mut child_node.entry,
+                    &mut child_node.copy_source,
+                );
+                let new_iter = child_node.children.iter_mut();
+                let old_iter = std::mem::replace(&mut iter, new_iter);
+                stack.push((data, old_iter));
+            }
+            // Found the end of a `children.values_mut()` iterator.
+            if let Some((data, next_iter)) = stack.pop() {
+                // "Return" from pseudo-recursion by restoring state from the
+                // explicit stack
+                iter = next_iter;
+
+                Some(data)
+            } else {
+                // Reached the bottom of the stack, we’re done
+                None
+            }
+        })
+    }
+}
+
+impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
+    fn clear(&mut self) {
+        self.root.clear();
+        self.nodes_with_entry_count = 0;
+        self.nodes_with_copy_source_count = 0;
+    }
+
+    fn add_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        entry: DirstateEntry,
+    ) -> Result<(), DirstateMapError> {
+        self.add_or_remove_file(filename, old_state, entry);
+        Ok(())
+    }
+
+    fn remove_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        size: i32,
+    ) -> Result<(), DirstateMapError> {
+        let entry = DirstateEntry {
+            state: EntryState::Removed,
+            mode: 0,
+            size,
+            mtime: 0,
+        };
+        self.add_or_remove_file(filename, old_state, entry);
+        Ok(())
+    }
+
+    fn drop_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+    ) -> Result<bool, DirstateMapError> {
+        struct Dropped {
+            was_tracked: bool,
+            had_entry: bool,
+            had_copy_source: bool,
+        }
+        fn recur(nodes: &mut ChildNodes, path: &HgPath) -> Option<Dropped> {
+            let (first_path_component, rest_of_path) =
+                path.split_first_component();
+            let node = nodes.get_mut(first_path_component)?;
+            let dropped;
+            if let Some(rest) = rest_of_path {
+                dropped = recur(&mut node.children, rest)?;
+                if dropped.was_tracked {
+                    node.tracked_descendants_count -= 1;
+                }
+            } else {
+                dropped = Dropped {
+                    was_tracked: node
+                        .entry
+                        .as_ref()
+                        .map_or(false, |entry| entry.state.is_tracked()),
+                    had_entry: node.entry.take().is_some(),
+                    had_copy_source: node.copy_source.take().is_some(),
+                };
+            }
+            // After recursion, for both leaf (rest_of_path is None) nodes and
+            // parent nodes, remove a node if it just became empty.
+            if node.entry.is_none()
+                && node.copy_source.is_none()
+                && node.children.is_empty()
+            {
+                nodes.remove(first_path_component);
+            }
+            Some(dropped)
+        }
+
+        if let Some(dropped) = recur(&mut self.root, filename) {
+            if dropped.had_entry {
+                self.nodes_with_entry_count -= 1
+            }
+            if dropped.had_copy_source {
+                self.nodes_with_copy_source_count -= 1
+            }
+            Ok(dropped.had_entry)
+        } else {
+            debug_assert!(!old_state.is_tracked());
+            Ok(false)
+        }
+    }
+
+    fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32) {
+        for filename in filenames {
+            if let Some(node) = Self::get_node_mut(&mut self.root, &filename) {
+                if let Some(entry) = node.entry.as_mut() {
+                    clear_ambiguous_mtime(entry, now);
+                }
+            }
+        }
+    }
+
+    fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool {
+        self.get_node(key)
+            .and_then(|node| node.entry.as_ref())
+            .map_or(false, DirstateEntry::is_non_normal)
+    }
+
+    fn non_normal_entries_remove(&mut self, _key: &HgPath) {
+        // Do nothing, this `DirstateMap` does not have a separate "non normal
+        // entries" set that need to be kept up to date
+    }
+
+    fn non_normal_or_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + '_> {
+        Box::new(self.iter_nodes().filter_map(|(path, node)| {
+            node.entry
+                .as_ref()
+                .filter(|entry| {
+                    entry.is_non_normal() || entry.is_from_other_parent()
+                })
+                .map(|_| path)
+        }))
+    }
+
+    fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
+        // Do nothing, this `DirstateMap` does not have a separate "non normal
+        // entries" and "from other parent" sets that need to be recomputed
+    }
+
+    fn iter_non_normal_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        self.iter_non_normal_paths_panic()
+    }
+
+    fn iter_non_normal_paths_panic(
+        &self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        Box::new(self.iter_nodes().filter_map(|(path, node)| {
+            node.entry
+                .as_ref()
+                .filter(|entry| entry.is_non_normal())
+                .map(|_| path)
+        }))
+    }
+
+    fn iter_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        Box::new(self.iter_nodes().filter_map(|(path, node)| {
+            node.entry
+                .as_ref()
+                .filter(|entry| entry.is_from_other_parent())
+                .map(|_| path)
+        }))
+    }
+
+    fn has_tracked_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        if let Some(node) = self.get_node(directory) {
+            // A node without a `DirstateEntry` was created to hold child
+            // nodes, and is therefore a directory.
+            Ok(node.entry.is_none() && node.tracked_descendants_count > 0)
+        } else {
+            Ok(false)
+        }
+    }
+
+    fn has_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        if let Some(node) = self.get_node(directory) {
+            // A node without a `DirstateEntry` was created to hold child
+            // nodes, and is therefore a directory.
+            Ok(node.entry.is_none())
+        } else {
+            Ok(false)
+        }
+    }
+
+    fn pack(
+        &mut self,
+        parents: DirstateParents,
+        now: Timestamp,
+    ) -> Result<Vec<u8>, DirstateError> {
+        // Optizimation (to be measured?): pre-compute size to avoid `Vec`
+        // reallocations
+        let mut size = parents.as_bytes().len();
+        for (path, node) in self.iter_nodes() {
+            if node.entry.is_some() {
+                size += packed_entry_size(
+                    path,
+                    node.copy_source.as_ref().map(|p| &**p),
+                )
+            }
+        }
+
+        let mut packed = Vec::with_capacity(size);
+        packed.extend(parents.as_bytes());
+
+        let now: i32 = now.0.try_into().expect("time overflow");
+        for (path, opt_entry, copy_source) in self.iter_node_data_mut() {
+            if let Some(entry) = opt_entry {
+                clear_ambiguous_mtime(entry, now);
+                pack_entry(
+                    path,
+                    entry,
+                    copy_source.as_ref().map(|p| &**p),
+                    &mut packed,
+                );
+            }
+        }
+        Ok(packed)
+    }
+
+    fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+        // Do nothing, this `DirstateMap` does not a separate `all_dirs` that
+        // needs to be recomputed
+        Ok(())
+    }
+
+    fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+        // Do nothing, this `DirstateMap` does not a separate `dirs` that needs
+        // to be recomputed
+        Ok(())
+    }
+
+    fn status<'a>(
+        &'a mut self,
+        matcher: &'a (dyn Matcher + Sync),
+        root_dir: PathBuf,
+        ignore_files: Vec<PathBuf>,
+        options: StatusOptions,
+    ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+    {
+        super::status::status(self, matcher, root_dir, ignore_files, options)
+    }
+
+    fn copy_map_len(&self) -> usize {
+        self.nodes_with_copy_source_count
+    }
+
+    fn copy_map_iter(&self) -> CopyMapIter<'_> {
+        Box::new(self.iter_nodes().filter_map(|(path, node)| {
+            node.copy_source
+                .as_ref()
+                .map(|copy_source| (path, &**copy_source))
+        }))
+    }
+
+    fn copy_map_contains_key(&self, key: &HgPath) -> bool {
+        if let Some(node) = self.get_node(key) {
+            node.copy_source.is_some()
+        } else {
+            false
+        }
+    }
+
+    fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath> {
+        self.get_node(key)?.copy_source.as_ref().map(|p| &**p)
+    }
+
+    fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf> {
+        let count = &mut self.nodes_with_copy_source_count;
+        Self::get_node_mut(&mut self.root, key).and_then(|node| {
+            if node.copy_source.is_some() {
+                *count -= 1
+            }
+            node.copy_source.take().map(Cow::into_owned)
+        })
+    }
+
+    fn copy_map_insert(
+        &mut self,
+        key: HgPathBuf,
+        value: HgPathBuf,
+    ) -> Option<HgPathBuf> {
+        let node = Self::get_or_insert_node(
+            &mut self.root,
+            &key,
+            WithBasename::to_cow_owned,
+            |_ancestor| {},
+        );
+        if node.copy_source.is_none() {
+            self.nodes_with_copy_source_count += 1
+        }
+        node.copy_source.replace(value.into()).map(Cow::into_owned)
+    }
+
+    fn len(&self) -> usize {
+        self.nodes_with_entry_count
+    }
+
+    fn contains_key(&self, key: &HgPath) -> bool {
+        self.get(key).is_some()
+    }
+
+    fn get(&self, key: &HgPath) -> Option<&DirstateEntry> {
+        self.get_node(key)?.entry.as_ref()
+    }
+
+    fn iter(&self) -> StateMapIter<'_> {
+        Box::new(self.iter_nodes().filter_map(|(path, node)| {
+            node.entry.as_ref().map(|entry| (path, entry))
+        }))
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/dispatch.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,284 @@
+use std::path::PathBuf;
+
+use crate::dirstate::parsers::Timestamp;
+use crate::matchers::Matcher;
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::CopyMapIter;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateMap;
+use crate::DirstateMapError;
+use crate::DirstateParents;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::PatternFileWarning;
+use crate::StateMapIter;
+use crate::StatusError;
+use crate::StatusOptions;
+
+pub trait DirstateMapMethods {
+    fn clear(&mut self);
+
+    fn add_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        entry: DirstateEntry,
+    ) -> Result<(), DirstateMapError>;
+
+    fn remove_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        size: i32,
+    ) -> Result<(), DirstateMapError>;
+
+    fn drop_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+    ) -> Result<bool, DirstateMapError>;
+
+    fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32);
+
+    fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool;
+
+    fn non_normal_entries_remove(&mut self, key: &HgPath);
+
+    fn non_normal_or_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + '_>;
+
+    fn set_non_normal_other_parent_entries(&mut self, force: bool);
+
+    fn iter_non_normal_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_>;
+
+    fn iter_non_normal_paths_panic(
+        &self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_>;
+
+    fn iter_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_>;
+
+    fn has_tracked_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError>;
+
+    fn has_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError>;
+
+    fn pack(
+        &mut self,
+        parents: DirstateParents,
+        now: Timestamp,
+    ) -> Result<Vec<u8>, DirstateError>;
+
+    fn set_all_dirs(&mut self) -> Result<(), DirstateMapError>;
+
+    fn set_dirs(&mut self) -> Result<(), DirstateMapError>;
+
+    fn status<'a>(
+        &'a mut self,
+        matcher: &'a (dyn Matcher + Sync),
+        root_dir: PathBuf,
+        ignore_files: Vec<PathBuf>,
+        options: StatusOptions,
+    ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
+
+    fn copy_map_len(&self) -> usize;
+
+    fn copy_map_iter(&self) -> CopyMapIter<'_>;
+
+    fn copy_map_contains_key(&self, key: &HgPath) -> bool;
+
+    fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath>;
+
+    fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf>;
+
+    fn copy_map_insert(
+        &mut self,
+        key: HgPathBuf,
+        value: HgPathBuf,
+    ) -> Option<HgPathBuf>;
+
+    fn len(&self) -> usize;
+
+    fn contains_key(&self, key: &HgPath) -> bool;
+
+    fn get(&self, key: &HgPath) -> Option<&DirstateEntry>;
+
+    fn iter(&self) -> StateMapIter<'_>;
+}
+
+impl DirstateMapMethods for DirstateMap {
+    fn clear(&mut self) {
+        self.clear()
+    }
+
+    fn add_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        entry: DirstateEntry,
+    ) -> Result<(), DirstateMapError> {
+        self.add_file(filename, old_state, entry)
+    }
+
+    fn remove_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        size: i32,
+    ) -> Result<(), DirstateMapError> {
+        self.remove_file(filename, old_state, size)
+    }
+
+    fn drop_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+    ) -> Result<bool, DirstateMapError> {
+        self.drop_file(filename, old_state)
+    }
+
+    fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32) {
+        self.clear_ambiguous_times(filenames, now)
+    }
+
+    fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool {
+        let (non_normal, _other_parent) =
+            self.get_non_normal_other_parent_entries();
+        non_normal.contains(key)
+    }
+
+    fn non_normal_entries_remove(&mut self, key: &HgPath) {
+        self.non_normal_entries_remove(key)
+    }
+
+    fn non_normal_or_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + '_> {
+        let (non_normal, other_parent) =
+            self.get_non_normal_other_parent_entries();
+        Box::new(non_normal.union(other_parent).map(|p| &**p))
+    }
+
+    fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+        self.set_non_normal_other_parent_entries(force)
+    }
+
+    fn iter_non_normal_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        let (non_normal, _other_parent) =
+            self.get_non_normal_other_parent_entries();
+        Box::new(non_normal.iter().map(|p| &**p))
+    }
+
+    fn iter_non_normal_paths_panic(
+        &self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        let (non_normal, _other_parent) =
+            self.get_non_normal_other_parent_entries_panic();
+        Box::new(non_normal.iter().map(|p| &**p))
+    }
+
+    fn iter_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        let (_non_normal, other_parent) =
+            self.get_non_normal_other_parent_entries();
+        Box::new(other_parent.iter().map(|p| &**p))
+    }
+
+    fn has_tracked_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        self.has_tracked_dir(directory)
+    }
+
+    fn has_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        self.has_dir(directory)
+    }
+
+    fn pack(
+        &mut self,
+        parents: DirstateParents,
+        now: Timestamp,
+    ) -> Result<Vec<u8>, DirstateError> {
+        self.pack(parents, now)
+    }
+
+    fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+        self.set_all_dirs()
+    }
+
+    fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+        self.set_dirs()
+    }
+
+    fn status<'a>(
+        &'a mut self,
+        matcher: &'a (dyn Matcher + Sync),
+        root_dir: PathBuf,
+        ignore_files: Vec<PathBuf>,
+        options: StatusOptions,
+    ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+    {
+        crate::status(self, matcher, root_dir, ignore_files, options)
+    }
+
+    fn copy_map_len(&self) -> usize {
+        self.copy_map.len()
+    }
+
+    fn copy_map_iter(&self) -> CopyMapIter<'_> {
+        Box::new(self.copy_map.iter().map(|(key, value)| (&**key, &**value)))
+    }
+
+    fn copy_map_contains_key(&self, key: &HgPath) -> bool {
+        self.copy_map.contains_key(key)
+    }
+
+    fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath> {
+        self.copy_map.get(key).map(|p| &**p)
+    }
+
+    fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf> {
+        self.copy_map.remove(key)
+    }
+
+    fn copy_map_insert(
+        &mut self,
+        key: HgPathBuf,
+        value: HgPathBuf,
+    ) -> Option<HgPathBuf> {
+        self.copy_map.insert(key, value)
+    }
+
+    fn len(&self) -> usize {
+        (&**self).len()
+    }
+
+    fn contains_key(&self, key: &HgPath) -> bool {
+        (&**self).contains_key(key)
+    }
+
+    fn get(&self, key: &HgPath) -> Option<&DirstateEntry> {
+        (&**self).get(key)
+    }
+
+    fn iter(&self) -> StateMapIter<'_> {
+        Box::new((&**self).iter().map(|(key, value)| (&**key, value)))
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/path_with_basename.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,172 @@
+use crate::utils::hg_path::HgPath;
+use std::borrow::{Borrow, Cow};
+
+/// Wraps `HgPath` or `HgPathBuf` to make it behave "as" its last path
+/// component, a.k.a. its base name (as in Python’s `os.path.basename`), but
+/// also allow recovering the full path.
+///
+/// "Behaving as" means that equality and comparison consider only the base
+/// name, and `std::borrow::Borrow` is implemented to return only the base
+/// name. This allows using the base name as a map key while still being able
+/// to recover the full path, in a single memory allocation.
+#[derive(Debug)]
+pub struct WithBasename<T> {
+    full_path: T,
+
+    /// The position after the last slash separator in `full_path`, or `0`
+    /// if there is no slash.
+    base_name_start: usize,
+}
+
+impl<T> WithBasename<T> {
+    pub fn full_path(&self) -> &T {
+        &self.full_path
+    }
+}
+
+impl<T: AsRef<HgPath>> WithBasename<T> {
+    pub fn new(full_path: T) -> Self {
+        let base_name_start = if let Some(last_slash_position) = full_path
+            .as_ref()
+            .as_bytes()
+            .iter()
+            .rposition(|&byte| byte == b'/')
+        {
+            last_slash_position + 1
+        } else {
+            0
+        };
+        Self {
+            base_name_start,
+            full_path,
+        }
+    }
+
+    pub fn base_name(&self) -> &HgPath {
+        HgPath::new(
+            &self.full_path.as_ref().as_bytes()[self.base_name_start..],
+        )
+    }
+}
+
+impl<T: AsRef<HgPath>> Borrow<HgPath> for WithBasename<T> {
+    fn borrow(&self) -> &HgPath {
+        self.base_name()
+    }
+}
+
+impl<T: AsRef<HgPath>> std::hash::Hash for WithBasename<T> {
+    fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) {
+        self.base_name().hash(hasher)
+    }
+}
+
+impl<T: AsRef<HgPath> + PartialEq> PartialEq for WithBasename<T> {
+    fn eq(&self, other: &Self) -> bool {
+        self.base_name() == other.base_name()
+    }
+}
+
+impl<T: AsRef<HgPath> + Eq> Eq for WithBasename<T> {}
+
+impl<T: AsRef<HgPath> + PartialOrd> PartialOrd for WithBasename<T> {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        self.base_name().partial_cmp(other.base_name())
+    }
+}
+
+impl<T: AsRef<HgPath> + Ord> Ord for WithBasename<T> {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.base_name().cmp(other.base_name())
+    }
+}
+
+impl<'a> WithBasename<&'a HgPath> {
+    pub fn to_cow_borrowed(self) -> WithBasename<Cow<'a, HgPath>> {
+        WithBasename {
+            full_path: Cow::Borrowed(self.full_path),
+            base_name_start: self.base_name_start,
+        }
+    }
+
+    pub fn to_cow_owned<'b>(self) -> WithBasename<Cow<'b, HgPath>> {
+        WithBasename {
+            full_path: Cow::Owned(self.full_path.to_owned()),
+            base_name_start: self.base_name_start,
+        }
+    }
+}
+
+impl<'a> WithBasename<&'a HgPath> {
+    /// Returns an iterator of `WithBasename<&HgPath>` for the ancestor
+    /// directory paths of the given `path`, as well as `path` itself.
+    ///
+    /// For example, the full paths of inclusive ancestors of "a/b/c" are "a",
+    /// "a/b", and "a/b/c" in that order.
+    pub fn inclusive_ancestors_of(
+        path: &'a HgPath,
+    ) -> impl Iterator<Item = WithBasename<&'a HgPath>> {
+        let mut slash_positions =
+            path.as_bytes().iter().enumerate().filter_map(|(i, &byte)| {
+                if byte == b'/' {
+                    Some(i)
+                } else {
+                    None
+                }
+            });
+        let mut opt_next_component_start = Some(0);
+        std::iter::from_fn(move || {
+            opt_next_component_start.take().map(|next_component_start| {
+                if let Some(slash_pos) = slash_positions.next() {
+                    opt_next_component_start = Some(slash_pos + 1);
+                    Self {
+                        full_path: HgPath::new(&path.as_bytes()[..slash_pos]),
+                        base_name_start: next_component_start,
+                    }
+                } else {
+                    // Not setting `opt_next_component_start` here: there will
+                    // be no iteration after this one because `.take()` set it
+                    // to `None`.
+                    Self {
+                        full_path: path,
+                        base_name_start: next_component_start,
+                    }
+                }
+            })
+        })
+    }
+}
+
+#[test]
+fn test() {
+    let a = WithBasename::new(HgPath::new("a").to_owned());
+    assert_eq!(&**a.full_path(), HgPath::new(b"a"));
+    assert_eq!(a.base_name(), HgPath::new(b"a"));
+
+    let cba = WithBasename::new(HgPath::new("c/b/a").to_owned());
+    assert_eq!(&**cba.full_path(), HgPath::new(b"c/b/a"));
+    assert_eq!(cba.base_name(), HgPath::new(b"a"));
+
+    assert_eq!(a, cba);
+    let borrowed: &HgPath = cba.borrow();
+    assert_eq!(borrowed, HgPath::new("a"));
+}
+
+#[test]
+fn test_inclusive_ancestors() {
+    let mut iter = WithBasename::inclusive_ancestors_of(HgPath::new("a/bb/c"));
+
+    let next = iter.next().unwrap();
+    assert_eq!(*next.full_path(), HgPath::new("a"));
+    assert_eq!(next.base_name(), HgPath::new("a"));
+
+    let next = iter.next().unwrap();
+    assert_eq!(*next.full_path(), HgPath::new("a/bb"));
+    assert_eq!(next.base_name(), HgPath::new("bb"));
+
+    let next = iter.next().unwrap();
+    assert_eq!(*next.full_path(), HgPath::new("a/bb/c"));
+    assert_eq!(next.base_name(), HgPath::new("c"));
+
+    assert!(iter.next().is_none());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/status.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,428 @@
+use crate::dirstate::status::IgnoreFnType;
+use crate::dirstate_tree::dirstate_map::ChildNodes;
+use crate::dirstate_tree::dirstate_map::DirstateMap;
+use crate::dirstate_tree::dirstate_map::Node;
+use crate::matchers::get_ignore_function;
+use crate::matchers::Matcher;
+use crate::utils::files::get_bytes_from_os_string;
+use crate::utils::hg_path::HgPath;
+use crate::BadMatch;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::HgPathBuf;
+use crate::PatternFileWarning;
+use crate::StatusError;
+use crate::StatusOptions;
+use micro_timer::timed;
+use rayon::prelude::*;
+use std::borrow::Cow;
+use std::io;
+use std::path::Path;
+use std::path::PathBuf;
+use std::sync::Mutex;
+
+/// Returns the status of the working directory compared to its parent
+/// changeset.
+///
+/// This algorithm is based on traversing the filesystem tree (`fs` in function
+/// and variable names) and dirstate tree at the same time. The core of this
+/// traversal is the recursive `traverse_fs_directory_and_dirstate` function
+/// and its use of `itertools::merge_join_by`. When reaching a path that only
+/// exists in one of the two trees, depending on information requested by
+/// `options` we may need to traverse the remaining subtree.
+#[timed]
+pub fn status<'tree>(
+    dmap: &'tree mut DirstateMap,
+    matcher: &(dyn Matcher + Sync),
+    root_dir: PathBuf,
+    ignore_files: Vec<PathBuf>,
+    options: StatusOptions,
+) -> Result<(DirstateStatus<'tree>, Vec<PatternFileWarning>), StatusError> {
+    let (ignore_fn, warnings): (IgnoreFnType, _) =
+        if options.list_ignored || options.list_unknown {
+            get_ignore_function(ignore_files, &root_dir)?
+        } else {
+            (Box::new(|&_| true), vec![])
+        };
+
+    let common = StatusCommon {
+        options,
+        matcher,
+        ignore_fn,
+        outcome: Mutex::new(DirstateStatus::default()),
+    };
+    let is_at_repo_root = true;
+    let hg_path = HgPath::new("");
+    let has_ignored_ancestor = false;
+    common.traverse_fs_directory_and_dirstate(
+        has_ignored_ancestor,
+        &mut dmap.root,
+        hg_path,
+        &root_dir,
+        is_at_repo_root,
+    );
+    Ok((common.outcome.into_inner().unwrap(), warnings))
+}
+
+/// Bag of random things needed by various parts of the algorithm. Reduces the
+/// number of parameters passed to functions.
+struct StatusCommon<'tree, 'a> {
+    options: StatusOptions,
+    matcher: &'a (dyn Matcher + Sync),
+    ignore_fn: IgnoreFnType<'a>,
+    outcome: Mutex<DirstateStatus<'tree>>,
+}
+
+impl<'tree, 'a> StatusCommon<'tree, 'a> {
+    fn read_dir(
+        &self,
+        hg_path: &HgPath,
+        fs_path: &Path,
+        is_at_repo_root: bool,
+    ) -> Result<Vec<DirEntry>, ()> {
+        DirEntry::read_dir(fs_path, is_at_repo_root).map_err(|error| {
+            let errno = error.raw_os_error().expect("expected real OS error");
+            self.outcome
+                .lock()
+                .unwrap()
+                .bad
+                .push((hg_path.to_owned().into(), BadMatch::OsError(errno)))
+        })
+    }
+
+    fn traverse_fs_directory_and_dirstate(
+        &self,
+        has_ignored_ancestor: bool,
+        dirstate_nodes: &'tree mut ChildNodes,
+        directory_hg_path: &'tree HgPath,
+        directory_fs_path: &Path,
+        is_at_repo_root: bool,
+    ) {
+        let mut fs_entries = if let Ok(entries) = self.read_dir(
+            directory_hg_path,
+            directory_fs_path,
+            is_at_repo_root,
+        ) {
+            entries
+        } else {
+            return;
+        };
+
+        // `merge_join_by` requires both its input iterators to be sorted:
+
+        let mut dirstate_nodes: Vec<_> = dirstate_nodes.iter_mut().collect();
+        // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
+        // https://github.com/rust-lang/rust/issues/34162
+        dirstate_nodes
+            .sort_unstable_by(|(path1, _), (path2, _)| path1.cmp(path2));
+        fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
+
+        itertools::merge_join_by(
+            dirstate_nodes,
+            &fs_entries,
+            |(full_path, _node), fs_entry| {
+                full_path.base_name().cmp(&fs_entry.base_name)
+            },
+        )
+        .par_bridge()
+        .for_each(|pair| {
+            use itertools::EitherOrBoth::*;
+            match pair {
+                Both((hg_path, dirstate_node), fs_entry) => {
+                    self.traverse_fs_and_dirstate(
+                        fs_entry,
+                        hg_path.full_path(),
+                        dirstate_node,
+                        has_ignored_ancestor,
+                    );
+                }
+                Left((hg_path, dirstate_node)) => self.traverse_dirstate_only(
+                    hg_path.full_path(),
+                    dirstate_node,
+                ),
+                Right(fs_entry) => self.traverse_fs_only(
+                    has_ignored_ancestor,
+                    directory_hg_path,
+                    fs_entry,
+                ),
+            }
+        })
+    }
+
+    fn traverse_fs_and_dirstate(
+        &self,
+        fs_entry: &DirEntry,
+        hg_path: &'tree HgPath,
+        dirstate_node: &'tree mut Node,
+        has_ignored_ancestor: bool,
+    ) {
+        let file_type = fs_entry.metadata.file_type();
+        let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+        if !file_or_symlink {
+            // If we previously had a file here, it was removed (with
+            // `hg rm` or similar) or deleted before it could be
+            // replaced by a directory or something else.
+            self.mark_removed_or_deleted_if_file(
+                hg_path,
+                dirstate_node.state(),
+            );
+        }
+        if file_type.is_dir() {
+            if self.options.collect_traversed_dirs {
+                self.outcome.lock().unwrap().traversed.push(hg_path.into())
+            }
+            let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
+            let is_at_repo_root = false;
+            self.traverse_fs_directory_and_dirstate(
+                is_ignored,
+                &mut dirstate_node.children,
+                hg_path,
+                &fs_entry.full_path,
+                is_at_repo_root,
+            );
+        } else {
+            if file_or_symlink && self.matcher.matches(hg_path) {
+                let full_path = Cow::from(hg_path);
+                if let Some(entry) = &dirstate_node.entry {
+                    match entry.state {
+                        EntryState::Added => {
+                            self.outcome.lock().unwrap().added.push(full_path)
+                        }
+                        EntryState::Removed => self
+                            .outcome
+                            .lock()
+                            .unwrap()
+                            .removed
+                            .push(full_path),
+                        EntryState::Merged => self
+                            .outcome
+                            .lock()
+                            .unwrap()
+                            .modified
+                            .push(full_path),
+                        EntryState::Normal => {
+                            self.handle_normal_file(
+                                full_path,
+                                dirstate_node,
+                                entry,
+                                fs_entry,
+                            );
+                        }
+                        // This variant is not used in DirstateMap
+                        // nodes
+                        EntryState::Unknown => unreachable!(),
+                    }
+                } else {
+                    // `node.entry.is_none()` indicates a "directory"
+                    // node, but the filesystem has a file
+                    self.mark_unknown_or_ignored(
+                        has_ignored_ancestor,
+                        full_path,
+                    )
+                }
+            }
+
+            for (child_hg_path, child_node) in &mut dirstate_node.children {
+                self.traverse_dirstate_only(
+                    child_hg_path.full_path(),
+                    child_node,
+                )
+            }
+        }
+    }
+
+    /// A file with `EntryState::Normal` in the dirstate was found in the
+    /// filesystem
+    fn handle_normal_file(
+        &self,
+        full_path: Cow<'tree, HgPath>,
+        dirstate_node: &Node,
+        entry: &crate::DirstateEntry,
+        fs_entry: &DirEntry,
+    ) {
+        // Keep the low 31 bits
+        fn truncate_u64(value: u64) -> i32 {
+            (value & 0x7FFF_FFFF) as i32
+        }
+        fn truncate_i64(value: i64) -> i32 {
+            (value & 0x7FFF_FFFF) as i32
+        }
+
+        let mode_changed = || {
+            self.options.check_exec && entry.mode_changed(&fs_entry.metadata)
+        };
+        let size_changed = entry.size != truncate_u64(fs_entry.metadata.len());
+        if entry.size >= 0
+            && size_changed
+            && fs_entry.metadata.file_type().is_symlink()
+        {
+            // issue6456: Size returned may be longer due to encryption
+            // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
+            self.outcome.lock().unwrap().unsure.push(full_path)
+        } else if dirstate_node.copy_source.is_some()
+            || entry.is_from_other_parent()
+            || (entry.size >= 0 && (size_changed || mode_changed()))
+        {
+            self.outcome.lock().unwrap().modified.push(full_path)
+        } else {
+            let mtime = mtime_seconds(&fs_entry.metadata);
+            if truncate_i64(mtime) != entry.mtime
+                || mtime == self.options.last_normal_time
+            {
+                self.outcome.lock().unwrap().unsure.push(full_path)
+            } else if self.options.list_clean {
+                self.outcome.lock().unwrap().clean.push(full_path)
+            }
+        }
+    }
+
+    /// A node in the dirstate tree has no corresponding filesystem entry
+    fn traverse_dirstate_only(
+        &self,
+        hg_path: &'tree HgPath,
+        dirstate_node: &'tree mut Node,
+    ) {
+        self.mark_removed_or_deleted_if_file(hg_path, dirstate_node.state());
+        dirstate_node.children.par_iter_mut().for_each(
+            |(child_hg_path, child_node)| {
+                self.traverse_dirstate_only(
+                    child_hg_path.full_path(),
+                    child_node,
+                )
+            },
+        )
+    }
+
+    /// A node in the dirstate tree has no corresponding *file* on the
+    /// filesystem
+    ///
+    /// Does nothing on a "directory" node
+    fn mark_removed_or_deleted_if_file(
+        &self,
+        hg_path: &'tree HgPath,
+        dirstate_node_state: Option<EntryState>,
+    ) {
+        if let Some(state) = dirstate_node_state {
+            if self.matcher.matches(hg_path) {
+                if let EntryState::Removed = state {
+                    self.outcome.lock().unwrap().removed.push(hg_path.into())
+                } else {
+                    self.outcome.lock().unwrap().deleted.push(hg_path.into())
+                }
+            }
+        }
+    }
+
+    /// Something in the filesystem has no corresponding dirstate node
+    fn traverse_fs_only(
+        &self,
+        has_ignored_ancestor: bool,
+        directory_hg_path: &HgPath,
+        fs_entry: &DirEntry,
+    ) {
+        let hg_path = directory_hg_path.join(&fs_entry.base_name);
+        let file_type = fs_entry.metadata.file_type();
+        let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+        if file_type.is_dir() {
+            let is_ignored =
+                has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+            let traverse_children = if is_ignored {
+                // Descendants of an ignored directory are all ignored
+                self.options.list_ignored
+            } else {
+                // Descendants of an unknown directory may be either unknown or
+                // ignored
+                self.options.list_unknown || self.options.list_ignored
+            };
+            if traverse_children {
+                let is_at_repo_root = false;
+                if let Ok(children_fs_entries) = self.read_dir(
+                    &hg_path,
+                    &fs_entry.full_path,
+                    is_at_repo_root,
+                ) {
+                    children_fs_entries.par_iter().for_each(|child_fs_entry| {
+                        self.traverse_fs_only(
+                            is_ignored,
+                            &hg_path,
+                            child_fs_entry,
+                        )
+                    })
+                }
+            }
+            if self.options.collect_traversed_dirs {
+                self.outcome.lock().unwrap().traversed.push(hg_path.into())
+            }
+        } else if file_or_symlink && self.matcher.matches(&hg_path) {
+            self.mark_unknown_or_ignored(has_ignored_ancestor, hg_path.into())
+        }
+    }
+
+    fn mark_unknown_or_ignored(
+        &self,
+        has_ignored_ancestor: bool,
+        hg_path: Cow<'tree, HgPath>,
+    ) {
+        let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+        if is_ignored {
+            if self.options.list_ignored {
+                self.outcome.lock().unwrap().ignored.push(hg_path)
+            }
+        } else {
+            if self.options.list_unknown {
+                self.outcome.lock().unwrap().unknown.push(hg_path)
+            }
+        }
+    }
+}
+
+#[cfg(unix)] // TODO
+fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
+    // Going through `Metadata::modified()` would be portable, but would take
+    // care to construct a `SystemTime` value with sub-second precision just
+    // for us to throw that away here.
+    use std::os::unix::fs::MetadataExt;
+    metadata.mtime()
+}
+
+struct DirEntry {
+    base_name: HgPathBuf,
+    full_path: PathBuf,
+    metadata: std::fs::Metadata,
+}
+
+impl DirEntry {
+    /// Returns **unsorted** entries in the given directory, with name and
+    /// metadata.
+    ///
+    /// If a `.hg` sub-directory is encountered:
+    ///
+    /// * At the repository root, ignore that sub-directory
+    /// * Elsewhere, we’re listing the content of a sub-repo. Return an empty
+    ///   list instead.
+    fn read_dir(path: &Path, is_at_repo_root: bool) -> io::Result<Vec<Self>> {
+        let mut results = Vec::new();
+        for entry in path.read_dir()? {
+            let entry = entry?;
+            let metadata = entry.metadata()?;
+            let name = get_bytes_from_os_string(entry.file_name());
+            // FIXME don't do this when cached
+            if name == b".hg" {
+                if is_at_repo_root {
+                    // Skip the repo’s own .hg (might be a symlink)
+                    continue;
+                } else if metadata.is_dir() {
+                    // A .hg sub-directory at another location means a subrepo,
+                    // skip it entirely.
+                    return Ok(Vec::new());
+                }
+            }
+            results.push(DirEntry {
+                base_name: name.into(),
+                full_path: entry.path(),
+                metadata,
+            })
+        }
+        Ok(results)
+    }
+}
--- a/rust/hg-core/src/lib.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/lib.rs	Mon May 17 15:05:24 2021 +0200
@@ -8,7 +8,8 @@
 pub mod dagops;
 pub mod errors;
 pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
-mod dirstate;
+pub mod dirstate;
+pub mod dirstate_tree;
 pub mod discovery;
 pub mod requirements;
 pub mod testing; // unconditionally built, for use from integration tests
--- a/rust/hg-core/src/operations/dirstate_status.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/operations/dirstate_status.rs	Mon May 17 15:05:24 2021 +0200
@@ -5,17 +5,12 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
-use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status};
+use crate::dirstate::status::{build_response, Dispatch, Status};
 use crate::matchers::Matcher;
 use crate::{DirstateStatus, StatusError};
 
-/// A tuple of the paths that need to be checked in the filelog because it's
-/// ambiguous whether they've changed, and the rest of the already dispatched
-/// files.
-pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
-
-impl<'a, M: Matcher + Sync> Status<'a, M> {
-    pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
+impl<'a, M: ?Sized + Matcher + Sync> Status<'a, M> {
+    pub(crate) fn run(&self) -> Result<DirstateStatus<'a>, StatusError> {
         let (traversed_sender, traversed_receiver) =
             crossbeam_channel::unbounded();
 
--- a/rust/hg-core/src/requirements.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/requirements.rs	Mon May 17 15:05:24 2021 +0200
@@ -124,11 +124,6 @@
 #[allow(unused)]
 pub(crate) const SPARSEREVLOG_REQUIREMENT: &str = "sparserevlog";
 
-/// A repository with the sidedataflag requirement will allow to store extra
-/// information for revision without altering their original hashes.
-#[allow(unused)]
-pub(crate) const SIDEDATA_REQUIREMENT: &str = "exp-sidedata-flag";
-
 /// A repository with the the copies-sidedata-changeset requirement will store
 /// copies related information in changeset's sidedata.
 #[allow(unused)]
--- a/rust/hg-core/src/utils/files.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/utils/files.rs	Mon May 17 15:05:24 2021 +0200
@@ -17,7 +17,7 @@
 use lazy_static::lazy_static;
 use same_file::is_same_file;
 use std::borrow::{Cow, ToOwned};
-use std::ffi::OsStr;
+use std::ffi::{OsStr, OsString};
 use std::fs::Metadata;
 use std::iter::FusedIterator;
 use std::ops::Deref;
@@ -53,6 +53,12 @@
     str.as_ref().as_bytes().to_vec()
 }
 
+#[cfg(unix)]
+pub fn get_bytes_from_os_string(str: OsString) -> Vec<u8> {
+    use std::os::unix::ffi::OsStringExt;
+    str.into_vec()
+}
+
 /// An iterator over repository path yielding itself and its ancestors.
 #[derive(Copy, Clone, Debug)]
 pub struct Ancestors<'a> {
--- a/rust/hg-core/src/utils/hg_path.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-core/src/utils/hg_path.rs	Mon May 17 15:05:24 2021 +0200
@@ -5,7 +5,9 @@
 // This software may be used and distributed according to the terms of the
 // GNU General Public License version 2 or any later version.
 
+use crate::utils::SliceExt;
 use std::borrow::Borrow;
+use std::borrow::Cow;
 use std::convert::TryFrom;
 use std::ffi::{OsStr, OsString};
 use std::fmt;
@@ -226,6 +228,20 @@
         inner.extend(other.as_ref().bytes());
         HgPathBuf::from_bytes(&inner)
     }
+
+    pub fn components(&self) -> impl Iterator<Item = &HgPath> {
+        self.inner.split(|&byte| byte == b'/').map(HgPath::new)
+    }
+
+    /// Returns the first (that is "root-most") slash-separated component of
+    /// the path, and the rest after the first slash if there is one.
+    pub fn split_first_component(&self) -> (&HgPath, Option<&HgPath>) {
+        match self.inner.split_2(b'/') {
+            Some((a, b)) => (HgPath::new(a), Some(HgPath::new(b))),
+            None => (self, None),
+        }
+    }
+
     pub fn parent(&self) -> &Self {
         let inner = self.as_bytes();
         HgPath::new(match inner.iter().rposition(|b| *b == b'/') {
@@ -530,6 +546,24 @@
     }
 }
 
+impl From<HgPathBuf> for Cow<'_, HgPath> {
+    fn from(path: HgPathBuf) -> Self {
+        Cow::Owned(path)
+    }
+}
+
+impl<'a> From<&'a HgPath> for Cow<'a, HgPath> {
+    fn from(path: &'a HgPath) -> Self {
+        Cow::Borrowed(path)
+    }
+}
+
+impl<'a> From<&'a HgPathBuf> for Cow<'a, HgPath> {
+    fn from(path: &'a HgPathBuf) -> Self {
+        Cow::Borrowed(&**path)
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
--- a/rust/hg-cpython/src/dirstate.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/dirstate.rs	Mon May 17 15:05:24 2021 +0200
@@ -12,7 +12,9 @@
 mod copymap;
 mod dirs_multiset;
 mod dirstate_map;
+mod dispatch;
 mod non_normal_entries;
+mod owning;
 mod status;
 use crate::{
     dirstate::{
--- a/rust/hg-cpython/src/dirstate/copymap.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/copymap.rs	Mon May 17 15:05:24 2021 +0200
@@ -14,7 +14,8 @@
 use std::cell::RefCell;
 
 use crate::dirstate::dirstate_map::DirstateMap;
-use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
+use hg::utils::hg_path::HgPath;
+use hg::CopyMapIter;
 
 py_class!(pub class CopyMap |py| {
     data dirstate_map: DirstateMap;
@@ -87,13 +88,13 @@
     }
     fn translate_key(
         py: Python,
-        res: (&HgPathBuf, &HgPathBuf),
+        res: (&HgPath, &HgPath),
     ) -> PyResult<Option<PyBytes>> {
         Ok(Some(PyBytes::new(py, res.0.as_bytes())))
     }
     fn translate_key_value(
         py: Python,
-        res: (&HgPathBuf, &HgPathBuf),
+        res: (&HgPath, &HgPath),
     ) -> PyResult<Option<(PyBytes, PyBytes)>> {
         let (k, v) = res;
         Ok(Some((
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs	Mon May 17 15:05:24 2021 +0200
@@ -8,14 +8,13 @@
 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
 //! `hg-core` package.
 
-use std::cell::{Ref, RefCell};
+use std::cell::{RefCell, RefMut};
 use std::convert::TryInto;
-use std::time::Duration;
 
 use cpython::{
     exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
-    PyObject, PyResult, PySet, PyString, PyTuple, Python, PythonObject,
-    ToPyObject, UnsafePyLeaked,
+    PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
+    UnsafePyLeaked,
 };
 
 use crate::{
@@ -23,15 +22,20 @@
     dirstate::non_normal_entries::{
         NonNormalEntries, NonNormalEntriesIterator,
     },
+    dirstate::owning::OwningDirstateMap,
     dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
     parsers::dirstate_parents_to_pytuple,
 };
 use hg::{
+    dirstate::parsers::Timestamp,
+    dirstate_tree::dispatch::DirstateMapMethods,
     errors::HgError,
     revlog::Node,
+    utils::files::normalize_case,
     utils::hg_path::{HgPath, HgPathBuf},
-    DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
-    DirstateMapError, DirstateParents, EntryState, StateMapIter,
+    DirsMultiset, DirstateEntry, DirstateError,
+    DirstateMap as RustDirstateMap, DirstateMapError, DirstateParents,
+    EntryState, StateMapIter,
 };
 
 // TODO
@@ -47,11 +51,28 @@
 //     All attributes also have to have a separate refcount data attribute for
 //     leaks, with all methods that go along for reference sharing.
 py_class!(pub class DirstateMap |py| {
-    @shared data inner: RustDirstateMap;
+    @shared data inner: Box<dyn DirstateMapMethods + Send>;
 
-    def __new__(_cls, _root: PyObject) -> PyResult<Self> {
-        let inner = RustDirstateMap::default();
-        Self::create_instance(py, inner)
+    /// Returns a `(dirstate_map, parents)` tuple
+    @staticmethod
+    def new(use_dirstate_tree: bool, on_disk: PyBytes) -> PyResult<PyObject> {
+        let dirstate_error = |_: DirstateError| {
+            PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string())
+        };
+        let (inner, parents) = if use_dirstate_tree {
+            let (map, parents) =
+                OwningDirstateMap::new(py, on_disk)
+                .map_err(dirstate_error)?;
+            (Box::new(map) as _, parents)
+        } else {
+            let bytes = on_disk.data(py);
+            let mut map = RustDirstateMap::default();
+            let parents = map.read(bytes).map_err(dirstate_error)?;
+            (Box::new(map) as _, parents)
+        };
+        let map = Self::create_instance(py, inner)?;
+        let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
+        Ok((map, parents).to_py_object(py).into_object())
     }
 
     def clear(&self) -> PyResult<PyObject> {
@@ -172,11 +193,8 @@
 
     def other_parent_entries(&self) -> PyResult<PyObject> {
         let mut inner_shared = self.inner(py).borrow_mut();
-        let (_, other_parent) =
-            inner_shared.get_non_normal_other_parent_entries();
-
         let set = PySet::empty(py)?;
-        for path in other_parent.iter() {
+        for path in inner_shared.iter_other_parent_paths() {
             set.add(py, PyBytes::new(py, path.as_bytes()))?;
         }
         Ok(set.into_object())
@@ -191,8 +209,7 @@
         Ok(self
             .inner(py)
             .borrow_mut()
-            .get_non_normal_other_parent_entries().0
-            .contains(HgPath::new(key.data(py))))
+            .non_normal_entries_contains(HgPath::new(key.data(py))))
     }
 
     def non_normal_entries_display(&self) -> PyResult<PyString> {
@@ -200,14 +217,17 @@
             PyString::new(
                 py,
                 &format!(
-                    "NonNormalEntries: {:?}",
-                    self
-                        .inner(py)
-                        .borrow_mut()
-                        .get_non_normal_other_parent_entries().0
-                        .iter().map(|o| o))
+                    "NonNormalEntries: {}",
+                    hg::utils::join_display(
+                        self
+                            .inner(py)
+                            .borrow_mut()
+                            .iter_non_normal_paths(),
+                        ", "
+                    )
                 )
             )
+        )
     }
 
     def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
@@ -219,22 +239,11 @@
         Ok(py.None())
     }
 
-    def non_normal_entries_union(&self, other: PyObject) -> PyResult<PyList> {
-        let other: PyResult<_> = other.iter(py)?
-                    .map(|f| {
-                        Ok(HgPathBuf::from_bytes(
-                            f?.extract::<PyBytes>(py)?.data(py),
-                        ))
-                    })
-                    .collect();
-
-        let res = self
-            .inner(py)
-            .borrow_mut()
-            .non_normal_entries_union(other?);
+    def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
+        let mut inner = self.inner(py).borrow_mut();
 
         let ret = PyList::new(py, &[]);
-        for filename in res.iter() {
+        for filename in inner.non_normal_or_other_parent_paths() {
             let as_pystring = PyBytes::new(py, filename.as_bytes());
             ret.append(py, as_pystring.into_object());
         }
@@ -252,7 +261,7 @@
 
         NonNormalEntriesIterator::from_inner(py, unsafe {
             leaked_ref.map(py, |o| {
-                o.get_non_normal_other_parent_entries_panic().0.iter()
+                o.iter_non_normal_paths_panic()
             })
         })
     }
@@ -277,49 +286,13 @@
             .to_py_object(py))
     }
 
-    def parents(&self, st: PyObject) -> PyResult<PyTuple> {
-        self.inner(py).borrow_mut()
-            .parents(st.extract::<PyBytes>(py)?.data(py))
-            .map(|parents| dirstate_parents_to_pytuple(py, parents))
-            .or_else(|_| {
-                Err(PyErr::new::<exc::OSError, _>(
-                    py,
-                    "Dirstate error".to_string(),
-                ))
-            })
-    }
-
-    def setparents(&self, p1: PyObject, p2: PyObject) -> PyResult<PyObject> {
-        let p1 = extract_node_id(py, &p1)?;
-        let p2 = extract_node_id(py, &p2)?;
-
-        self.inner(py).borrow_mut()
-            .set_parents(&DirstateParents { p1, p2 });
-        Ok(py.None())
-    }
-
-    def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
-        match self.inner(py).borrow_mut()
-            .read(st.extract::<PyBytes>(py)?.data(py))
-        {
-            Ok(Some(parents)) => Ok(Some(
-                dirstate_parents_to_pytuple(py, parents)
-                    .into_object()
-            )),
-            Ok(None) => Ok(Some(py.None())),
-            Err(_) => Err(PyErr::new::<exc::OSError, _>(
-                py,
-                "Dirstate error".to_string(),
-            )),
-        }
-    }
     def write(
         &self,
         p1: PyObject,
         p2: PyObject,
         now: PyObject
     ) -> PyResult<PyBytes> {
-        let now = Duration::new(now.extract(py)?, 0);
+        let now = Timestamp(now.extract(py)?);
         let parents = DirstateParents {
             p1: extract_node_id(py, &p1)?,
             p2: extract_node_id(py, &p2)?,
@@ -336,14 +309,16 @@
 
     def filefoldmapasdict(&self) -> PyResult<PyDict> {
         let dict = PyDict::new(py);
-        for (key, value) in
-            self.inner(py).borrow_mut().build_file_fold_map().iter()
-        {
-            dict.set_item(
-                py,
-                PyBytes::new(py, key.as_bytes()).into_object(),
-                PyBytes::new(py, value.as_bytes()).into_object(),
-            )?;
+        for (path, entry) in self.inner(py).borrow_mut().iter() {
+            if entry.state != EntryState::Removed {
+                let key = normalize_case(path);
+                let value = path;
+                dict.set_item(
+                    py,
+                    PyBytes::new(py, key.as_bytes()).into_object(),
+                    PyBytes::new(py, value.as_bytes()).into_object(),
+                )?;
+            }
         }
         Ok(dict)
     }
@@ -404,7 +379,7 @@
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
-                &self.inner(py).borrow(),
+                self.inner(py).borrow().iter(),
                 Some(EntryState::Removed),
             )
             .map_err(|e| {
@@ -421,7 +396,7 @@
         Dirs::from_inner(
             py,
             DirsMultiset::from_dirstate(
-                &self.inner(py).borrow(),
+                self.inner(py).borrow().iter(),
                 None,
             ).map_err(|e| {
                 PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -432,7 +407,7 @@
     // TODO all copymap* methods, see docstring above
     def copymapcopy(&self) -> PyResult<PyDict> {
         let dict = PyDict::new(py);
-        for (key, value) in self.inner(py).borrow().copy_map.iter() {
+        for (key, value) in self.inner(py).borrow().copy_map_iter() {
             dict.set_item(
                 py,
                 PyBytes::new(py, key.as_bytes()),
@@ -444,7 +419,7 @@
 
     def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
         let key = key.extract::<PyBytes>(py)?;
-        match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
+        match self.inner(py).borrow().copy_map_get(HgPath::new(key.data(py))) {
             Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
             None => Err(PyErr::new::<exc::KeyError, _>(
                 py,
@@ -457,15 +432,14 @@
     }
 
     def copymaplen(&self) -> PyResult<usize> {
-        Ok(self.inner(py).borrow().copy_map.len())
+        Ok(self.inner(py).borrow().copy_map_len())
     }
     def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
         let key = key.extract::<PyBytes>(py)?;
         Ok(self
             .inner(py)
             .borrow()
-            .copy_map
-            .contains_key(HgPath::new(key.data(py))))
+            .copy_map_contains_key(HgPath::new(key.data(py))))
     }
     def copymapget(
         &self,
@@ -476,8 +450,7 @@
         match self
             .inner(py)
             .borrow()
-            .copy_map
-            .get(HgPath::new(key.data(py)))
+            .copy_map_get(HgPath::new(key.data(py)))
         {
             Some(copy) => Ok(Some(
                 PyBytes::new(py, copy.as_bytes()).into_object(),
@@ -492,7 +465,7 @@
     ) -> PyResult<PyObject> {
         let key = key.extract::<PyBytes>(py)?;
         let value = value.extract::<PyBytes>(py)?;
-        self.inner(py).borrow_mut().copy_map.insert(
+        self.inner(py).borrow_mut().copy_map_insert(
             HgPathBuf::from_bytes(key.data(py)),
             HgPathBuf::from_bytes(value.data(py)),
         );
@@ -507,8 +480,7 @@
         match self
             .inner(py)
             .borrow_mut()
-            .copy_map
-            .remove(HgPath::new(key.data(py)))
+            .copy_map_remove(HgPath::new(key.data(py)))
         {
             Some(_) => Ok(None),
             None => Ok(default),
@@ -519,7 +491,7 @@
         let leaked_ref = self.inner(py).leak_immutable();
         CopyMapKeysIterator::from_inner(
             py,
-            unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+            unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
         )
     }
 
@@ -527,28 +499,28 @@
         let leaked_ref = self.inner(py).leak_immutable();
         CopyMapItemsIterator::from_inner(
             py,
-            unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+            unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
         )
     }
 
 });
 
 impl DirstateMap {
-    pub fn get_inner<'a>(
+    pub fn get_inner_mut<'a>(
         &'a self,
         py: Python<'a>,
-    ) -> Ref<'a, RustDirstateMap> {
-        self.inner(py).borrow()
+    ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
+        self.inner(py).borrow_mut()
     }
     fn translate_key(
         py: Python,
-        res: (&HgPathBuf, &DirstateEntry),
+        res: (&HgPath, &DirstateEntry),
     ) -> PyResult<Option<PyBytes>> {
         Ok(Some(PyBytes::new(py, res.0.as_bytes())))
     }
     fn translate_key_value(
         py: Python,
-        res: (&HgPathBuf, &DirstateEntry),
+        res: (&HgPath, &DirstateEntry),
     ) -> PyResult<Option<(PyBytes, PyObject)>> {
         let (f, entry) = res;
         Ok(Some((
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/dispatch.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,175 @@
+use crate::dirstate::owning::OwningDirstateMap;
+use hg::dirstate::parsers::Timestamp;
+use hg::dirstate_tree::dispatch::DirstateMapMethods;
+use hg::matchers::Matcher;
+use hg::utils::hg_path::{HgPath, HgPathBuf};
+use hg::CopyMapIter;
+use hg::DirstateEntry;
+use hg::DirstateError;
+use hg::DirstateMapError;
+use hg::DirstateParents;
+use hg::DirstateStatus;
+use hg::EntryState;
+use hg::PatternFileWarning;
+use hg::StateMapIter;
+use hg::StatusError;
+use hg::StatusOptions;
+use std::path::PathBuf;
+
+impl DirstateMapMethods for OwningDirstateMap {
+    fn clear(&mut self) {
+        self.get_mut().clear()
+    }
+
+    fn add_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        entry: DirstateEntry,
+    ) -> Result<(), DirstateMapError> {
+        self.get_mut().add_file(filename, old_state, entry)
+    }
+
+    fn remove_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+        size: i32,
+    ) -> Result<(), DirstateMapError> {
+        self.get_mut().remove_file(filename, old_state, size)
+    }
+
+    fn drop_file(
+        &mut self,
+        filename: &HgPath,
+        old_state: EntryState,
+    ) -> Result<bool, DirstateMapError> {
+        self.get_mut().drop_file(filename, old_state)
+    }
+
+    fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32) {
+        self.get_mut().clear_ambiguous_times(filenames, now)
+    }
+
+    fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool {
+        self.get_mut().non_normal_entries_contains(key)
+    }
+
+    fn non_normal_entries_remove(&mut self, key: &HgPath) {
+        self.get_mut().non_normal_entries_remove(key)
+    }
+
+    fn non_normal_or_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + '_> {
+        self.get_mut().non_normal_or_other_parent_paths()
+    }
+
+    fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+        self.get_mut().set_non_normal_other_parent_entries(force)
+    }
+
+    fn iter_non_normal_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        self.get_mut().iter_non_normal_paths()
+    }
+
+    fn iter_non_normal_paths_panic(
+        &self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        self.get().iter_non_normal_paths_panic()
+    }
+
+    fn iter_other_parent_paths(
+        &mut self,
+    ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+        self.get_mut().iter_other_parent_paths()
+    }
+
+    fn has_tracked_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        self.get_mut().has_tracked_dir(directory)
+    }
+
+    fn has_dir(
+        &mut self,
+        directory: &HgPath,
+    ) -> Result<bool, DirstateMapError> {
+        self.get_mut().has_dir(directory)
+    }
+
+    fn pack(
+        &mut self,
+        parents: DirstateParents,
+        now: Timestamp,
+    ) -> Result<Vec<u8>, DirstateError> {
+        self.get_mut().pack(parents, now)
+    }
+
+    fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+        self.get_mut().set_all_dirs()
+    }
+
+    fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+        self.get_mut().set_dirs()
+    }
+
+    fn status<'a>(
+        &'a mut self,
+        matcher: &'a (dyn Matcher + Sync),
+        root_dir: PathBuf,
+        ignore_files: Vec<PathBuf>,
+        options: StatusOptions,
+    ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+    {
+        self.get_mut()
+            .status(matcher, root_dir, ignore_files, options)
+    }
+
+    fn copy_map_len(&self) -> usize {
+        self.get().copy_map_len()
+    }
+
+    fn copy_map_iter(&self) -> CopyMapIter<'_> {
+        self.get().copy_map_iter()
+    }
+
+    fn copy_map_contains_key(&self, key: &HgPath) -> bool {
+        self.get().copy_map_contains_key(key)
+    }
+
+    fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath> {
+        self.get().copy_map_get(key)
+    }
+
+    fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf> {
+        self.get_mut().copy_map_remove(key)
+    }
+
+    fn copy_map_insert(
+        &mut self,
+        key: HgPathBuf,
+        value: HgPathBuf,
+    ) -> Option<HgPathBuf> {
+        self.get_mut().copy_map_insert(key, value)
+    }
+
+    fn len(&self) -> usize {
+        self.get().len()
+    }
+
+    fn contains_key(&self, key: &HgPath) -> bool {
+        self.get().contains_key(key)
+    }
+
+    fn get(&self, key: &HgPath) -> Option<&DirstateEntry> {
+        self.get().get(key)
+    }
+
+    fn iter(&self) -> StateMapIter<'_> {
+        self.get().iter()
+    }
+}
--- a/rust/hg-cpython/src/dirstate/non_normal_entries.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/non_normal_entries.rs	Mon May 17 15:05:24 2021 +0200
@@ -7,14 +7,13 @@
 
 use cpython::{
     exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
-    PyErr, PyList, PyObject, PyResult, PyString, Python, PythonObject,
-    ToPyObject, UnsafePyLeaked,
+    PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
+    UnsafePyLeaked,
 };
 
 use crate::dirstate::DirstateMap;
-use hg::utils::hg_path::HgPathBuf;
+use hg::utils::hg_path::HgPath;
 use std::cell::RefCell;
-use std::collections::hash_set;
 
 py_class!(pub class NonNormalEntries |py| {
     data dmap: DirstateMap;
@@ -25,9 +24,6 @@
     def remove(&self, key: PyObject) -> PyResult<PyObject> {
         self.dmap(py).non_normal_entries_remove(py, key)
     }
-    def union(&self, other: PyObject) -> PyResult<PyList> {
-        self.dmap(py).non_normal_entries_union(py, other)
-    }
     def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
         match op {
             CompareOp::Eq => self.is_equal_to(py, other),
@@ -58,15 +54,13 @@
         Ok(true)
     }
 
-    fn translate_key(
-        py: Python,
-        key: &HgPathBuf,
-    ) -> PyResult<Option<PyBytes>> {
+    fn translate_key(py: Python, key: &HgPath) -> PyResult<Option<PyBytes>> {
         Ok(Some(PyBytes::new(py, key.as_bytes())))
     }
 }
 
-type NonNormalEntriesIter<'a> = hash_set::Iter<'a, HgPathBuf>;
+type NonNormalEntriesIter<'a> =
+    Box<dyn Iterator<Item = &'a HgPath> + Send + 'a>;
 
 py_shared_iterator!(
     NonNormalEntriesIterator,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/owning.rs	Mon May 17 15:05:24 2021 +0200
@@ -0,0 +1,97 @@
+use cpython::PyBytes;
+use cpython::Python;
+use hg::dirstate_tree::dirstate_map::DirstateMap;
+use hg::DirstateError;
+use hg::DirstateParents;
+
+/// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
+/// borrows. This is similar to the owning-ref crate.
+///
+/// This is similar to [`OwningRef`] which is more limited because it
+/// represents exactly one `&T` reference next to the value it borrows, as
+/// opposed to a struct that may contain an arbitrary number of references in
+/// arbitrarily-nested data structures.
+///
+/// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
+pub(super) struct OwningDirstateMap {
+    /// Owned handle to a bytes buffer with a stable address.
+    ///
+    /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
+    on_disk: PyBytes,
+
+    /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
+    /// language cannot represent a lifetime referencing a sibling field.
+    /// This is not quite a self-referencial struct (moving this struct is not
+    /// a problem as it doesn’t change the address of the bytes buffer owned
+    /// by `PyBytes`) but touches similar borrow-checker limitations.
+    ptr: *mut (),
+}
+
+impl OwningDirstateMap {
+    pub fn new(
+        py: Python,
+        on_disk: PyBytes,
+    ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+        let bytes: &'_ [u8] = on_disk.data(py);
+        let (map, parents) = DirstateMap::new(bytes)?;
+
+        // Like in `bytes` above, this `'_` lifetime parameter borrows from
+        // the bytes buffer owned by `on_disk`.
+        let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
+
+        // Erase the pointed type entirely in order to erase the lifetime.
+        let ptr: *mut () = ptr.cast();
+
+        Ok((Self { on_disk, ptr }, parents))
+    }
+
+    pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
+        // SAFETY: We cast the type-erased pointer back to the same type it had
+        // in `new`, except with a different lifetime parameter. This time we
+        // connect the lifetime to that of `self`. This cast is valid because
+        // `self` owns the same `PyBytes` whose buffer `DirstateMap`
+        // references. That buffer has a stable memory address because the byte
+        // string value of a `PyBytes` is immutable.
+        let ptr: *mut DirstateMap<'a> = self.ptr.cast();
+        // SAFETY: we dereference that pointer, connecting the lifetime of the
+        // new   `&mut` to that of `self`. This is valid because the
+        // raw pointer is   to a boxed value, and `self` owns that box.
+        unsafe { &mut *ptr }
+    }
+
+    pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
+        // SAFETY: same reasoning as in `get_mut` above.
+        let ptr: *mut DirstateMap<'a> = self.ptr.cast();
+        unsafe { &*ptr }
+    }
+}
+
+impl Drop for OwningDirstateMap {
+    fn drop(&mut self) {
+        // Silence a "field is never read" warning, and demonstrate that this
+        // value is still alive.
+        let _ = &self.on_disk;
+        // SAFETY: this cast is the same as in `get_mut`, and is valid for the
+        // same reason. `self.on_disk` still exists at this point, drop glue
+        // will drop it implicitly after this `drop` method returns.
+        let ptr: *mut DirstateMap<'_> = self.ptr.cast();
+        // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
+        // This is fine because drop glue does nothig for `*mut ()` and we’re
+        // in `drop`, so `get` and `get_mut` cannot be called again.
+        unsafe { drop(Box::from_raw(ptr)) }
+    }
+}
+
+fn _static_assert_is_send<T: Send>() {}
+
+fn _static_assert_fields_are_send() {
+    _static_assert_is_send::<PyBytes>();
+    _static_assert_is_send::<Box<DirstateMap<'_>>>();
+}
+
+// SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
+// thread-safety of raw pointers is unknown in the general case. However this
+// particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
+// own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
+// is sound to mark this struct as `Send` too.
+unsafe impl Send for OwningDirstateMap {}
--- a/rust/hg-cpython/src/dirstate/status.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/dirstate/status.rs	Mon May 17 15:05:24 2021 +0200
@@ -17,7 +17,7 @@
 };
 use hg::{
     matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
-    parse_pattern_syntax, status,
+    parse_pattern_syntax,
     utils::{
         files::{get_bytes_from_path, get_path_from_bytes},
         hg_path::{HgPath, HgPathBuf},
@@ -25,7 +25,7 @@
     BadMatch, DirstateStatus, IgnorePattern, PatternFileWarning, StatusError,
     StatusOptions,
 };
-use std::borrow::{Borrow, Cow};
+use std::borrow::Borrow;
 
 /// This will be useless once trait impls for collection are added to `PyBytes`
 /// upstream.
@@ -112,7 +112,7 @@
     let root_dir = get_path_from_bytes(bytes.data(py));
 
     let dmap: DirstateMap = dmap.to_py_object(py);
-    let dmap = dmap.get_inner(py);
+    let mut dmap = dmap.get_inner_mut(py);
 
     let ignore_files: PyResult<Vec<_>> = ignore_files
         .iter(py)
@@ -126,22 +126,22 @@
     match matcher.get_type(py).name(py).borrow() {
         "alwaysmatcher" => {
             let matcher = AlwaysMatcher;
-            let ((lookup, status_res), warnings) = status(
-                &dmap,
-                &matcher,
-                root_dir.to_path_buf(),
-                ignore_files,
-                StatusOptions {
-                    check_exec,
-                    last_normal_time,
-                    list_clean,
-                    list_ignored,
-                    list_unknown,
-                    collect_traversed_dirs,
-                },
-            )
-            .map_err(|e| handle_fallback(py, e))?;
-            build_response(py, lookup, status_res, warnings)
+            let (status_res, warnings) = dmap
+                .status(
+                    &matcher,
+                    root_dir.to_path_buf(),
+                    ignore_files,
+                    StatusOptions {
+                        check_exec,
+                        last_normal_time,
+                        list_clean,
+                        list_ignored,
+                        list_unknown,
+                        collect_traversed_dirs,
+                    },
+                )
+                .map_err(|e| handle_fallback(py, e))?;
+            build_response(py, status_res, warnings)
         }
         "exactmatcher" => {
             let files = matcher.call_method(
@@ -163,22 +163,22 @@
             let files = files?;
             let matcher = FileMatcher::new(files.as_ref())
                 .map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
-            let ((lookup, status_res), warnings) = status(
-                &dmap,
-                &matcher,
-                root_dir.to_path_buf(),
-                ignore_files,
-                StatusOptions {
-                    check_exec,
-                    last_normal_time,
-                    list_clean,
-                    list_ignored,
-                    list_unknown,
-                    collect_traversed_dirs,
-                },
-            )
-            .map_err(|e| handle_fallback(py, e))?;
-            build_response(py, lookup, status_res, warnings)
+            let (status_res, warnings) = dmap
+                .status(
+                    &matcher,
+                    root_dir.to_path_buf(),
+                    ignore_files,
+                    StatusOptions {
+                        check_exec,
+                        last_normal_time,
+                        list_clean,
+                        list_ignored,
+                        list_unknown,
+                        collect_traversed_dirs,
+                    },
+                )
+                .map_err(|e| handle_fallback(py, e))?;
+            build_response(py, status_res, warnings)
         }
         "includematcher" => {
             // Get the patterns from Python even though most of them are
@@ -218,25 +218,25 @@
                     .map_err(|e| handle_fallback(py, e.into()))?;
             all_warnings.extend(warnings);
 
-            let ((lookup, status_res), warnings) = status(
-                &dmap,
-                &matcher,
-                root_dir.to_path_buf(),
-                ignore_files,
-                StatusOptions {
-                    check_exec,
-                    last_normal_time,
-                    list_clean,
-                    list_ignored,
-                    list_unknown,
-                    collect_traversed_dirs,
-                },
-            )
-            .map_err(|e| handle_fallback(py, e))?;
+            let (status_res, warnings) = dmap
+                .status(
+                    &matcher,
+                    root_dir.to_path_buf(),
+                    ignore_files,
+                    StatusOptions {
+                        check_exec,
+                        last_normal_time,
+                        list_clean,
+                        list_ignored,
+                        list_unknown,
+                        collect_traversed_dirs,
+                    },
+                )
+                .map_err(|e| handle_fallback(py, e))?;
 
             all_warnings.extend(warnings);
 
-            build_response(py, lookup, status_res, all_warnings)
+            build_response(py, status_res, all_warnings)
         }
         e => Err(PyErr::new::<ValueError, _>(
             py,
@@ -247,7 +247,6 @@
 
 fn build_response(
     py: Python,
-    lookup: Vec<Cow<HgPath>>,
     status_res: DirstateStatus,
     warnings: Vec<PatternFileWarning>,
 ) -> PyResult<PyTuple> {
@@ -258,7 +257,7 @@
     let clean = collect_pybytes_list(py, status_res.clean.as_ref());
     let ignored = collect_pybytes_list(py, status_res.ignored.as_ref());
     let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
-    let lookup = collect_pybytes_list(py, lookup.as_ref());
+    let unsure = collect_pybytes_list(py, status_res.unsure.as_ref());
     let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
     let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
     let py_warnings = PyList::new(py, &[]);
@@ -287,7 +286,7 @@
     Ok(PyTuple::new(
         py,
         &[
-            lookup.into_object(),
+            unsure.into_object(),
             modified.into_object(),
             added.into_object(),
             removed.into_object(),
--- a/rust/hg-cpython/src/parsers.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/parsers.rs	Mon May 17 15:05:24 2021 +0200
@@ -14,13 +14,13 @@
     PythonObject, ToPyObject,
 };
 use hg::{
-    pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
-    DirstateParents, FastHashMap, PARENT_SIZE,
+    dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
+    utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
+    PARENT_SIZE,
 };
 use std::convert::TryInto;
 
 use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
-use std::time::Duration;
 
 fn parse_dirstate_wrapper(
     py: Python,
@@ -98,7 +98,7 @@
             p1: p1.try_into().unwrap(),
             p2: p2.try_into().unwrap(),
         },
-        Duration::from_secs(now.as_object().extract::<u64>(py)?),
+        Timestamp(now.as_object().extract::<u64>(py)?),
     ) {
         Ok(packed) => {
             for (filename, entry) in dirstate_map.iter() {
--- a/rust/hg-cpython/src/revlog.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hg-cpython/src/revlog.rs	Mon May 17 15:05:24 2021 +0200
@@ -172,6 +172,16 @@
         self.call_cindex(py, "clearcaches", args, kw)
     }
 
+    /// return the raw binary string representing a revision
+    def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "entry_binary", args, kw)
+    }
+
+    /// return a binary packed version of the header
+    def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "pack_header", args, kw)
+    }
+
     /// get an index entry
     def get(&self, *args, **kw) -> PyResult<PyObject> {
         self.call_cindex(py, "get", args, kw)
--- a/rust/hgcli/pyoxidizer.bzl	Fri May 07 10:39:58 2021 +0200
+++ b/rust/hgcli/pyoxidizer.bzl	Mon May 17 15:05:24 2021 +0200
@@ -1,5 +1,37 @@
+# The following variables can be passed in as parameters:
+#
+# VERSION
+#   Version string of program being produced.
+#
+# MSI_NAME
+#   Root name of MSI installer.
+#
+# EXTRA_MSI_FEATURES
+#   ; delimited string of extra features to advertise in the built MSA.
+#
+# SIGNING_PFX_PATH
+#   Path to code signing certificate to use.
+#
+# SIGNING_PFX_PASSWORD
+#   Password to code signing PFX file defined by SIGNING_PFX_PATH.
+#
+# SIGNING_SUBJECT_NAME
+#   String fragment in code signing certificate subject name used to find
+#   code signing certificate in Windows certificate store.
+#
+# TIME_STAMP_SERVER_URL
+#   URL of time-stamp token authority (RFC 3161) servers to stamp code signatures.
+
 ROOT = CWD + "/../.."
 
+VERSION = VARS.get("VERSION", "5.8")
+MSI_NAME = VARS.get("MSI_NAME", "mercurial")
+EXTRA_MSI_FEATURES = VARS.get("EXTRA_MSI_FEATURES")
+SIGNING_PFX_PATH = VARS.get("SIGNING_PFX_PATH")
+SIGNING_PFX_PASSWORD = VARS.get("SIGNING_PFX_PASSWORD", "")
+SIGNING_SUBJECT_NAME = VARS.get("SIGNING_SUBJECT_NAME")
+TIME_STAMP_SERVER_URL = VARS.get("TIME_STAMP_SERVER_URL", "http://timestamp.digicert.com")
+
 IS_WINDOWS = "windows" in BUILD_TARGET_TRIPLE
 
 # Code to run in Python interpreter.
@@ -8,10 +40,7 @@
 set_build_path(ROOT + "/build/pyoxidizer")
 
 def make_distribution():
-    return default_python_distribution()
-
-def make_distribution_windows():
-    return default_python_distribution(flavor = "standalone_dynamic")
+    return default_python_distribution(python_version = "3.9")
 
 def resource_callback(policy, resource):
     if not IS_WINDOWS:
@@ -50,7 +79,7 @@
     packaging_policy.register_resource_callback(resource_callback)
 
     config = dist.make_python_interpreter_config()
-    config.raw_allocator = "system"
+    config.allocator_backend = "default"
     config.run_command = RUN_CODE
 
     # We want to let the user load extensions from the file system
@@ -83,34 +112,162 @@
 
     return m
 
-def make_embedded_resources(exe):
-    return exe.to_embedded_resources()
+
+# This adjusts the InstallManifest produced from exe generation to provide
+# additional files found in a Windows install layout.
+def make_windows_install_layout(manifest):
+    # Copy various files to new install locations. This can go away once
+    # we're using the importlib resource reader.
+    RECURSIVE_COPIES = {
+        "lib/mercurial/locale/": "locale/",
+        "lib/mercurial/templates/": "templates/",
+    }
+    for (search, replace) in RECURSIVE_COPIES.items():
+        for path in manifest.paths():
+            if path.startswith(search):
+                new_path = path.replace(search, replace)
+                print("copy %s to %s" % (path, new_path))
+                file = manifest.get_file(path)
+                manifest.add_file(file, path = new_path)
+
+    # Similar to above, but with filename pattern matching.
+    # lib/mercurial/helptext/**/*.txt -> helptext/
+    # lib/mercurial/defaultrc/*.rc -> defaultrc/
+    for path in manifest.paths():
+        if path.startswith("lib/mercurial/helptext/") and path.endswith(".txt"):
+            new_path = path[len("lib/mercurial/"):]
+        elif path.startswith("lib/mercurial/defaultrc/") and path.endswith(".rc"):
+            new_path = path[len("lib/mercurial/"):]
+        else:
+            continue
+
+        print("copying %s to %s" % (path, new_path))
+        manifest.add_file(manifest.get_file(path), path = new_path)
 
-register_target("distribution_posix", make_distribution)
-register_target("distribution_windows", make_distribution_windows)
+    # We also install a handful of additional files.
+    EXTRA_CONTRIB_FILES = [
+        "bash_completion",
+        "hgweb.fcgi",
+        "hgweb.wsgi",
+        "logo-droplets.svg",
+        "mercurial.el",
+        "mq.el",
+        "tcsh_completion",
+        "tcsh_completion_build.sh",
+        "xml.rnc",
+        "zsh_completion",
+    ]
+
+    for f in EXTRA_CONTRIB_FILES:
+        manifest.add_file(FileContent(path = ROOT + "/contrib/" + f), directory = "contrib")
 
-register_target("exe_posix", make_exe, depends = ["distribution_posix"])
-register_target("exe_windows", make_exe, depends = ["distribution_windows"])
+    # Individual files with full source to destination path mapping.
+    EXTRA_FILES = {
+        "contrib/hgk": "contrib/hgk.tcl",
+        "contrib/win32/postinstall.txt": "ReleaseNotes.txt",
+        "contrib/win32/ReadMe.html": "ReadMe.html",
+        "doc/style.css": "doc/style.css",
+        "COPYING": "Copying.txt",
+    }
+
+    for source, dest in EXTRA_FILES.items():
+        print("adding extra file %s" % dest)
+        manifest.add_file(FileContent(path = ROOT + "/" + source), path = dest)
+
+    # And finally some wildcard matches.
+    manifest.add_manifest(glob(
+        include = [ROOT + "/contrib/vim/*"],
+        strip_prefix = ROOT + "/"
+    ))
+    manifest.add_manifest(glob(
+        include = [ROOT + "/doc/*.html"],
+        strip_prefix = ROOT + "/"
+    ))
+
+    # But we don't ship hg-ssh on Windows, so exclude its documentation.
+    manifest.remove("doc/hg-ssh.8.html")
+
+    return manifest
+
 
-register_target(
-    "app_posix",
-    make_manifest,
-    depends = ["distribution_posix", "exe_posix"],
-    default = "windows" not in BUILD_TARGET_TRIPLE,
-)
-register_target(
-    "app_windows",
-    make_manifest,
-    depends = ["distribution_windows", "exe_windows"],
-    default = "windows" in BUILD_TARGET_TRIPLE,
-)
+def make_msi(manifest):
+    manifest = make_windows_install_layout(manifest)
+
+    if "x86_64" in BUILD_TARGET_TRIPLE:
+        platform = "x64"
+    else:
+        platform = "x86"
+
+    manifest.add_file(
+        FileContent(path = ROOT + "/contrib/packaging/wix/COPYING.rtf"),
+        path = "COPYING.rtf",
+    )
+    manifest.remove("Copying.txt")
+    manifest.add_file(
+        FileContent(path = ROOT + "/contrib/win32/mercurial.ini"),
+        path = "defaultrc/mercurial.rc",
+    )
+    manifest.add_file(
+        FileContent(filename = "editor.rc", content = "[ui]\neditor = notepad\n"),
+        path = "defaultrc/editor.rc",
+    )
+
+    wix = WiXInstaller("hg", "%s-%s.msi" % (MSI_NAME, VERSION))
+
+    # Materialize files in the manifest to the install layout.
+    wix.add_install_files(manifest)
+
+    # From mercurial.wxs.
+    wix.install_files_root_directory_id = "INSTALLDIR"
+
+    # Pull in our custom .wxs files.
+    defines = {
+        "PyOxidizer": "1",
+        "Platform": platform,
+        "Version": VERSION,
+        "Comments": "Installs Mercurial version %s" % VERSION,
+        "PythonVersion": "3",
+        "MercurialHasLib": "1",
+    }
+
+    if EXTRA_MSI_FEATURES:
+        defines["MercurialExtraFeatures"] = EXTRA_MSI_FEATURES
+
+    wix.add_wxs_file(
+        ROOT + "/contrib/packaging/wix/mercurial.wxs",
+        preprocessor_parameters=defines,
+    )
+
+    # Our .wxs references to other files. Pull those into the build environment.
+    for f in ("defines.wxi", "guids.wxi", "COPYING.rtf"):
+        wix.add_build_file(f, ROOT + "/contrib/packaging/wix/" + f)
+
+    wix.add_build_file("mercurial.ico", ROOT + "/contrib/win32/mercurial.ico")
+
+    return wix
+
+
+def register_code_signers():
+    if not IS_WINDOWS:
+        return
+
+    if SIGNING_PFX_PATH:
+        signer = code_signer_from_pfx_file(SIGNING_PFX_PATH, SIGNING_PFX_PASSWORD)
+    elif SIGNING_SUBJECT_NAME:
+        signer = code_signer_from_windows_store_subject(SIGNING_SUBJECT_NAME)
+    else:
+        signer = None
+
+    if signer:
+        signer.set_time_stamp_server(TIME_STAMP_SERVER_URL)
+        signer.activate()
+
+
+register_code_signers()
+
+register_target("distribution", make_distribution)
+register_target("exe", make_exe, depends = ["distribution"])
+register_target("app", make_manifest, depends = ["distribution", "exe"], default = True)
+register_target("msi", make_msi, depends = ["app"])
 
 resolve_targets()
-
-# END OF COMMON USER-ADJUSTED SETTINGS.
-#
-# Everything below this is typically managed by PyOxidizer and doesn't need
-# to be updated by people.
-
-PYOXIDIZER_VERSION = "0.9.0"
-PYOXIDIZER_COMMIT = "1fbc264cc004226cd76ee452e0a386ffca6ccfb1"
--- a/rust/rhg/src/commands/status.rs	Fri May 07 10:39:58 2021 +0200
+++ b/rust/rhg/src/commands/status.rs	Mon May 17 15:05:24 2021 +0200
@@ -181,7 +181,7 @@
         collect_traversed_dirs: false,
     };
     let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
-    let ((lookup, ds_status), pattern_warnings) = hg::status(
+    let (ds_status, pattern_warnings) = hg::status(
         &dmap,
         &AlwaysMatcher,
         repo.working_directory_path().to_owned(),
@@ -195,10 +195,10 @@
     if !ds_status.bad.is_empty() {
         warn!("Bad matches {:?}", &(ds_status.bad))
     }
-    if !lookup.is_empty() {
+    if !ds_status.unsure.is_empty() {
         info!(
             "Files to be rechecked by retrieval from filelog: {:?}",
-            &lookup
+            &ds_status.unsure
         );
     }
     // TODO check ordering to match `hg status` output.
@@ -206,7 +206,7 @@
     if display_states.modified {
         display_status_paths(ui, &(ds_status.modified), b"M")?;
     }
-    if !lookup.is_empty() {
+    if !ds_status.unsure.is_empty() {
         let p1: Node = parents
             .expect(
                 "Dirstate with no parents should not list any file to
@@ -217,7 +217,7 @@
         let p1_hex = format!("{:x}", p1);
         let mut rechecked_modified: Vec<HgPathCow> = Vec::new();
         let mut rechecked_clean: Vec<HgPathCow> = Vec::new();
-        for to_check in lookup {
+        for to_check in ds_status.unsure {
             if cat_file_is_modified(repo, &to_check, &p1_hex)? {
                 rechecked_modified.push(to_check);
             } else {
--- a/tests/drawdag.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/drawdag.py	Mon May 17 15:05:24 2021 +0200
@@ -86,7 +86,6 @@
 import itertools
 import re
 
-from mercurial.node import nullid
 from mercurial.i18n import _
 from mercurial import (
     context,
@@ -299,7 +298,7 @@
         self._added = added
         self._parents = parentctxs
         while len(self._parents) < 2:
-            self._parents.append(repo[nullid])
+            self._parents.append(repo[repo.nullid])
 
     def filectx(self, key):
         return simplefilectx(key, self._added[key])
@@ -388,7 +387,7 @@
         content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
         files[name][path] = content
 
-    committed = {None: nullid}  # {name: node}
+    committed = {None: repo.nullid}  # {name: node}
 
     # for leaf nodes, try to find existing nodes in repo
     for name, parents in edges.items():
--- a/tests/run-tests.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/run-tests.py	Mon May 17 15:05:24 2021 +0200
@@ -3544,7 +3544,7 @@
             if os.getenv('MSYSTEM'):
                 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
                     f.write(b'#!/bin/sh\n')
-                    f.write(b'py -3 "$@"\n')
+                    f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
 
             exedir, exename = os.path.split(sysexecutable)
             vlog(
--- a/tests/simplestorerepo.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/simplestorerepo.py	Mon May 17 15:05:24 2021 +0200
@@ -18,7 +18,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from mercurial.thirdparty import attr
@@ -136,18 +135,18 @@
             self._indexbynode[entry[b'node']] = entry
             self._indexbyrev[i] = entry
 
-        self._indexbynode[nullid] = {
-            b'node': nullid,
-            b'p1': nullid,
-            b'p2': nullid,
+        self._indexbynode[self._repo.nullid] = {
+            b'node': self._repo.nullid,
+            b'p1': self._repo.nullid,
+            b'p2': self._repo.nullid,
             b'linkrev': nullrev,
             b'flags': 0,
         }
 
         self._indexbyrev[nullrev] = {
-            b'node': nullid,
-            b'p1': nullid,
-            b'p2': nullid,
+            b'node': self._repo.nullid,
+            b'p1': self._repo.nullid,
+            b'p2': self._repo.nullid,
             b'linkrev': nullrev,
             b'flags': 0,
         }
@@ -160,7 +159,7 @@
                 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
             )
 
-        self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
+        self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
 
     def __len__(self):
         return len(self._indexdata)
@@ -288,7 +287,7 @@
             node = nodeorrev
         validatenode(node)
 
-        if node == nullid:
+        if node == self._repo.nullid:
             return b''
 
         rev = self.rev(node)
@@ -325,7 +324,7 @@
     def renamed(self, node):
         validatenode(node)
 
-        if self.parents(node)[0] != nullid:
+        if self.parents(node)[0] != self._repo.nullid:
             return False
 
         fulltext = self.revision(node)
@@ -451,7 +450,7 @@
         sidedata_helpers=None,
     ):
         # TODO this will probably break on some ordering options.
-        nodes = [n for n in nodes if n != nullid]
+        nodes = [n for n in nodes if n != self._repo.nullid]
         if not nodes:
             return
         for delta in storageutil.emitrevisions(
@@ -559,7 +558,7 @@
                 continue
 
             # Need to resolve the fulltext from the delta base.
-            if deltabase == nullid:
+            if deltabase == self._repo.nullid:
                 text = mdiff.patch(b'', delta)
             else:
                 text = mdiff.patch(self.revision(deltabase), delta)
@@ -588,11 +587,11 @@
         # This is copied from revlog.py.
         if start is None and stop is None:
             if not len(self):
-                return [nullid]
+                return [self._repo.nullid]
             return [self.node(r) for r in self._headrevs()]
 
         if start is None:
-            start = nullid
+            start = self._repo.nullid
         if stop is None:
             stop = []
         stoprevs = {self.rev(n) for n in stop}
--- a/tests/test-amend.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-amend.t	Mon May 17 15:05:24 2021 +0200
@@ -196,7 +196,8 @@
   $ hg update -q B
   $ echo 2 >> B
   $ hg amend
-  abort: cannot amend changeset with children
+  abort: cannot amend changeset, as that will orphan 1 descendants
+  (see 'hg help evolution.instability')
   [10]
 
 #if obsstore-on
@@ -231,6 +232,17 @@
   $ hg debugobsolete -r .
   112478962961147124edd43549aedd1a335e44bf be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
   be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 16084da537dd8f84cfdb3055c633772269d62e1b 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'adding bar', 'operation': 'amend', 'user': 'test'}
+
+Cannot cause divergence by default
+
+  $ hg co --hidden 1
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg amend -m divergent
+  abort: cannot amend 112478962961, as that creates content-divergence with 16084da537dd
+  (add --verbose for details)
+  [10]
+  $ hg amend -m divergent --config experimental.evolution.allowdivergence=true
+  2 new content-divergent changesets
 #endif
 
 Cannot amend public changeset
@@ -238,7 +250,7 @@
   $ hg phase -r A --public
   $ hg update -C -q A
   $ hg amend -m AMEND
-  abort: cannot amend public changesets
+  abort: cannot amend public changesets: 426bada5c675
   (see 'hg help phases' for details)
   [10]
 
--- a/tests/test-annotate.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-annotate.t	Mon May 17 15:05:24 2021 +0200
@@ -479,19 +479,19 @@
 
   $ cat > ../legacyrepo.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import commit, error, extensions, node
+  > from mercurial import commit, error, extensions
   > def _filecommit(orig, repo, fctx, manifest1, manifest2,
   >                 linkrev, tr, includecopymeta, ms):
   >     fname = fctx.path()
   >     text = fctx.data()
   >     flog = repo.file(fname)
-  >     fparent1 = manifest1.get(fname, node.nullid)
-  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     fparent1 = manifest1.get(fname, repo.nullid)
+  >     fparent2 = manifest2.get(fname, repo.nullid)
   >     meta = {}
   >     copy = fctx.copysource()
   >     if copy and copy != fname:
   >         raise error.Abort('copying is not supported')
-  >     if fparent2 != node.nullid:
+  >     if fparent2 != repo.nullid:
   >         return flog.add(text, meta, tr, linkrev,
   >                         fparent1, fparent2), 'modified'
   >     raise error.Abort('only merging is supported')
--- a/tests/test-blackbox.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-blackbox.t	Mon May 17 15:05:24 2021 +0200
@@ -221,7 +221,7 @@
   1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
   1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob)
   1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob)
-  1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
+  1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --no-profile --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
   1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 5
 
 log rotation
--- a/tests/test-branch-change.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-branch-change.t	Mon May 17 15:05:24 2021 +0200
@@ -57,7 +57,8 @@
 Change in middle of the stack (linear commits)
 
   $ hg branch -r 1::3 foo
-  abort: cannot change branch of changeset with children
+  abort: cannot change branch of changeset, as that will orphan 1 descendants
+  (see 'hg help evolution.instability')
   [10]
 
 Change with dirty working directory
@@ -128,7 +129,8 @@
 Changing on a branch head which is not topological head
 
   $ hg branch -r 2 stable
-  abort: cannot change branch of changeset with children
+  abort: cannot change branch of changeset, as that will orphan 2 descendants
+  (see 'hg help evolution.instability')
   [10]
 
 Enabling the allowunstable config and trying to change branch on a branch head
@@ -148,7 +150,8 @@
   [255]
 
   $ hg branch -r 4 --hidden foobar
-  abort: cannot change branch of a obsolete changeset
+  abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
+  (add --verbose for details)
   [10]
 
 Make sure bookmark movement is correct
@@ -366,7 +369,7 @@
 
   $ hg phase -r . -p
   $ hg branch -r . def
-  abort: cannot change branch of public changesets
+  abort: cannot change branch of public changesets: d1c2addda4a2
   (see 'hg help phases' for details)
   [10]
 
--- a/tests/test-bundle-r.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-bundle-r.t	Mon May 17 15:05:24 2021 +0200
@@ -224,7 +224,7 @@
   adding changesets
   transaction abort!
   rollback completed
-  abort: 00changelog.i@93ee6ab32777cd430e07da694794fb6a4f917712: unknown parent
+  abort: 00changelog@93ee6ab32777cd430e07da694794fb6a4f917712: unknown parent
   [50]
 
 revision 2
--- a/tests/test-bundle.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-bundle.t	Mon May 17 15:05:24 2021 +0200
@@ -751,7 +751,7 @@
 partial history bundle, fails w/ unknown parent
 
   $ hg -R bundle.hg verify
-  abort: 00changelog.i@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
+  abort: 00changelog@bbd179dfa0a71671c253b3ae0aa1513b60d199fa: unknown parent
   [50]
 
 full history bundle, refuses to verify non-local repo
--- a/tests/test-check-interfaces.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-check-interfaces.py	Mon May 17 15:05:24 2021 +0200
@@ -282,6 +282,7 @@
         revision=b'',
         sidedata=b'',
         delta=None,
+        protocol_flags=b'',
     )
     checkzobject(rd)
 
--- a/tests/test-chg.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-chg.t	Mon May 17 15:05:24 2021 +0200
@@ -458,6 +458,7 @@
   LC_CTYPE=
   $ (unset LC_ALL; unset LANG; LC_CTYPE=unsupported_value chg \
   >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+  *cannot change locale* (glob) (?)
   LC_CTYPE=unsupported_value
   $ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
   >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
@@ -467,3 +468,72 @@
   LC_ALL=
   LC_CTYPE=
   LANG=
+
+Profiling isn't permanently enabled or carried over between chg invocations that
+share the same server
+  $ cp $HGRCPATH.orig $HGRCPATH
+  $ hg init $TESTTMP/profiling
+  $ cd $TESTTMP/profiling
+  $ filteredchg() {
+  >   CHGDEBUG=1 chg "$@" 2>&1 | egrep 'Sample count|start cmdserver' || true
+  > }
+  $ newchg() {
+  >   chg --kill-chg-daemon
+  >   filteredchg "$@" | egrep -v 'start cmdserver' || true
+  > }
+(--profile isn't permanently on just because it was specified when chg was
+started)
+  $ newchg log -r . --profile
+  Sample count: * (glob)
+  $ filteredchg log -r .
+(enabling profiling via config works, even on the first chg command that starts
+a cmdserver)
+  $ cat >> $HGRCPATH <<EOF
+  > [profiling]
+  > type=stat
+  > enabled=1
+  > EOF
+  $ newchg log -r .
+  Sample count: * (glob)
+  $ filteredchg log -r .
+  Sample count: * (glob)
+(test that we aren't accumulating more and more samples each run)
+  $ cat > $TESTTMP/debugsleep.py <<EOF
+  > import time
+  > from mercurial import registrar
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > @command(b'debugsleep', [], b'', norepo=True)
+  > def debugsleep(ui):
+  >   start = time.time()
+  >   x = 0
+  >   while time.time() < start + 0.5:
+  >     time.sleep(.1)
+  >     x += 1
+  >   ui.status(b'%d debugsleep iterations in %.03fs\n' % (x, time.time() - start))
+  > EOF
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > debugsleep = $TESTTMP/debugsleep.py
+  > EOF
+  $ newchg debugsleep > run_1
+  $ filteredchg debugsleep > run_2
+  $ filteredchg debugsleep > run_3
+  $ filteredchg debugsleep > run_4
+FIXME: Run 4 should not be >3x Run 1's number of samples.
+  $ "$PYTHON" <<EOF
+  > r1 = int(open("run_1", "r").read().split()[-1])
+  > r4 = int(open("run_4", "r").read().split()[-1])
+  > print("Run 1: %d samples\nRun 4: %d samples\nRun 4 > 3 * Run 1: %s" %
+  >       (r1, r4, r4 > (r1 * 3)))
+  > EOF
+  Run 1: * samples (glob)
+  Run 4: * samples (glob)
+  Run 4 > 3 * Run 1: False
+(Disabling with --no-profile on the commandline still works, but isn't permanent)
+  $ newchg log -r . --no-profile
+  $ filteredchg log -r .
+  Sample count: * (glob)
+  $ filteredchg log -r . --no-profile
+  $ filteredchg log -r .
+  Sample count: * (glob)
--- a/tests/test-commit-amend.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-commit-amend.t	Mon May 17 15:05:24 2021 +0200
@@ -10,7 +10,7 @@
 
   $ hg phase -r . -p
   $ hg ci --amend
-  abort: cannot amend public changesets
+  abort: cannot amend public changesets: ad120869acf0
   (see 'hg help phases' for details)
   [10]
   $ hg phase -r . -f -d
@@ -406,7 +406,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg ci --amend
-  abort: cannot amend while merging
+  abort: cannot amend changesets while merging
   [20]
   $ hg ci -m 'merge'
 
@@ -957,6 +957,7 @@
   $ cat >> .hg/hgrc <<EOF
   > [committemplate]
   > changeset.commit.amend = {desc}\n
+  >     HG: {revset('parents()') % 'parent: {desc|firstline}\n'}
   >     HG: M: {file_mods}
   >     HG: A: {file_adds}
   >     HG: R: {file_dels}
@@ -971,6 +972,8 @@
   $ HGEDITOR=cat hg commit --amend -e -m "expecting diff of foo"
   expecting diff of foo
   
+  HG: parent: editor should be suppressed
+  
   HG: M: 
   HG: A: foo
   HG: R: 
@@ -985,6 +988,8 @@
   $ HGEDITOR=cat hg commit --amend -e -m "expecting diff of foo and y"
   expecting diff of foo and y
   
+  HG: parent: expecting diff of foo
+  
   HG: M: 
   HG: A: foo y
   HG: R: 
@@ -1003,6 +1008,8 @@
   $ HGEDITOR=cat hg commit --amend -e -m "expecting diff of a, foo and y"
   expecting diff of a, foo and y
   
+  HG: parent: expecting diff of foo and y
+  
   HG: M: 
   HG: A: foo y
   HG: R: a
@@ -1027,6 +1034,8 @@
   $ HGEDITOR=cat hg commit --amend -e -m "expecting diff of a, foo, x and y"
   expecting diff of a, foo, x and y
   
+  HG: parent: expecting diff of a, foo and y
+  
   HG: M: 
   HG: A: foo y
   HG: R: a x
@@ -1058,6 +1067,8 @@
   $ HGEDITOR=cat hg commit --amend -e -m "cc should be excluded" -X cc
   cc should be excluded
   
+  HG: parent: expecting diff of a, foo, x and y
+  
   HG: M: 
   HG: A: foo y
   HG: R: a x
--- a/tests/test-commit.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-commit.t	Mon May 17 15:05:24 2021 +0200
@@ -646,14 +646,14 @@
 verify pathauditor blocks evil filepaths
   $ cat > evil-commit.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import context, hg, node, ui as uimod
+  > from mercurial import context, hg, ui as uimod
   > notrc = u".h\u200cg".encode('utf-8') + b'/hgrc'
   > u = uimod.ui.load()
   > r = hg.repository(u, b'.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r.changelog.tip(), node.nullid],
+  > c = context.memctx(r, [r.changelog.tip(), r.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
@@ -672,14 +672,14 @@
   repository tip rolled back to revision 2 (undo commit)
   $ cat > evil-commit.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import context, hg, node, ui as uimod
+  > from mercurial import context, hg, ui as uimod
   > notrc = b"HG~1/hgrc"
   > u = uimod.ui.load()
   > r = hg.repository(u, b'.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+  > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
@@ -692,14 +692,14 @@
   repository tip rolled back to revision 2 (undo commit)
   $ cat > evil-commit.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import context, hg, node, ui as uimod
+  > from mercurial import context, hg, ui as uimod
   > notrc = b"HG8B6C~2/hgrc"
   > u = uimod.ui.load()
   > r = hg.repository(u, b'.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+  > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
--- a/tests/test-completion.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-completion.t	Mon May 17 15:05:24 2021 +0200
@@ -262,7 +262,7 @@
   cat: output, rev, decode, include, exclude, template
   clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
   commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
-  config: untrusted, edit, local, shared, non-shared, global, template
+  config: untrusted, edit, local, source, shared, non-shared, global, template
   continue: dry-run
   copy: forget, after, at-rev, force, include, exclude, dry-run
   debugancestor: 
--- a/tests/test-config.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-config.t	Mon May 17 15:05:24 2021 +0200
@@ -277,8 +277,7 @@
   > emptysource = `pwd`/emptysource.py
   > EOF
 
-  $ hg config --debug empty.source
-  read config from: * (glob)
+  $ hg config --source empty.source
   none: value
   $ hg config empty.source -Tjson
   [
@@ -349,16 +348,16 @@
 
 config affected by environment variables
 
-  $ EDITOR=e1 VISUAL=e2 hg config --debug | grep 'ui\.editor'
+  $ EDITOR=e1 VISUAL=e2 hg config --source | grep 'ui\.editor'
   $VISUAL: ui.editor=e2
 
-  $ VISUAL=e2 hg config --debug --config ui.editor=e3 | grep 'ui\.editor'
+  $ VISUAL=e2 hg config --source --config ui.editor=e3 | grep 'ui\.editor'
   --config: ui.editor=e3
 
-  $ PAGER=p1 hg config --debug | grep 'pager\.pager'
+  $ PAGER=p1 hg config --source | grep 'pager\.pager'
   $PAGER: pager.pager=p1
 
-  $ PAGER=p1 hg config --debug --config pager.pager=p2 | grep 'pager\.pager'
+  $ PAGER=p1 hg config --source --config pager.pager=p2 | grep 'pager\.pager'
   --config: pager.pager=p2
 
 verify that aliases are evaluated as well
--- a/tests/test-contrib-perf.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-contrib-perf.t	Mon May 17 15:05:24 2021 +0200
@@ -411,10 +411,10 @@
    >     from mercurial import (
    import newer module separately in try clause for early Mercurial
   contrib/perf.py:\d+: (re)
-   >     origindexpath = orig.opener.join(orig.indexfile)
+   >     origindexpath = orig.opener.join(indexfile)
    use getvfs()/getsvfs() for early Mercurial
   contrib/perf.py:\d+: (re)
-   >     origdatapath = orig.opener.join(orig.datafile)
+   >     origdatapath = orig.opener.join(datafile)
    use getvfs()/getsvfs() for early Mercurial
   contrib/perf.py:\d+: (re)
    >         vfs = vfsmod.vfs(tmpdir)
--- a/tests/test-convert-filemap.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-convert-filemap.t	Mon May 17 15:05:24 2021 +0200
@@ -292,12 +292,12 @@
   $ rm -rf source/.hg/store/data/dir/file4
 #endif
   $ hg -q convert --filemap renames.fmap --datesort source dummydest
-  abort: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  abort: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
   abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   [50]
   $ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
-  ignoring: data/dir/file3.i@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
-  ignoring: data/dir/file4.i@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
+  ignoring: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  ignoring: data/dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
   ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
   $ hg up -q -R renames.repo
--- a/tests/test-convert-hg-source.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-convert-hg-source.t	Mon May 17 15:05:24 2021 +0200
@@ -182,7 +182,7 @@
   sorting...
   converting...
   4 init
-  ignoring: data/b.i@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
+  ignoring: data/b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
   ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
   3 changeall
   2 changebagain
--- a/tests/test-copies-chain-merge.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-copies-chain-merge.t	Mon May 17 15:05:24 2021 +0200
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel
+#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel pull push pull-upgrade push-upgrade
 
 =====================================================
 Test Copy tracing for chain of copies involving merge
@@ -51,11 +51,41 @@
 #if sidedata
   $ cat >> $HGRCPATH << EOF
   > [format]
-  > exp-use-side-data = yes
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+#endif
+
+#if pull
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > EOF
+#endif
+
+#if push
+  $ cat >> $HGRCPATH << EOF
+  > [format]
   > exp-use-copies-side-data-changeset = yes
   > EOF
 #endif
 
+#if pull-upgrade
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = no
+  > [experimental]
+  > changegroup4 = yes
+  > EOF
+#endif
+
+#if push-upgrade
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = no
+  > [experimental]
+  > changegroup4 = yes
+  > EOF
+#endif
 
   $ cat > same-content.txt << EOF
   > Here is some content that will be the same accros multiple file.
@@ -1617,7 +1647,6 @@
 #if upgraded
   $ cat >> $HGRCPATH << EOF
   > [format]
-  > exp-use-side-data = yes
   > exp-use-copies-side-data-changeset = yes
   > EOF
   $ hg debugformat -v
@@ -1640,7 +1669,7 @@
   requirements
      preserved: * (glob)
      removed: revlogv1
-     added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
+     added: exp-copies-sidedata-changeset, exp-revlogv2.2
   
   processed revlogs:
     - all-filelogs
@@ -1652,7 +1681,6 @@
 #if upgraded-parallel
   $ cat >> $HGRCPATH << EOF
   > [format]
-  > exp-use-side-data = yes
   > exp-use-copies-side-data-changeset = yes
   > [experimental]
   > worker.repository-upgrade=yes
@@ -1680,7 +1708,7 @@
   requirements
      preserved: * (glob)
      removed: revlogv1
-     added: exp-copies-sidedata-changeset, exp-revlogv2.2, exp-sidedata-flag
+     added: exp-copies-sidedata-changeset, exp-revlogv2.2
   
   processed revlogs:
     - all-filelogs
@@ -1689,6 +1717,79 @@
   
 #endif
 
+#if pull
+  $ cd ..
+  $ mv repo-chain repo-source
+  $ hg init repo-chain
+  $ cd repo-chain
+  $ hg pull ../repo-source
+  pulling from ../repo-source
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 80 changesets with 44 changes to 25 files (+39 heads)
+  new changesets a3a31bbefea6:908ce9259ffa
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+#endif
+
+#if pull-upgrade
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > [experimental]
+  > changegroup4 = yes
+  > EOF
+  $ cd ..
+  $ mv repo-chain repo-source
+  $ hg init repo-chain
+  $ cd repo-chain
+  $ hg pull ../repo-source
+  pulling from ../repo-source
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 80 changesets with 44 changes to 25 files (+39 heads)
+  new changesets a3a31bbefea6:908ce9259ffa
+  (run 'hg heads' to see heads, 'hg merge' to merge)
+#endif
+
+#if push
+  $ cd ..
+  $ mv repo-chain repo-source
+  $ hg init repo-chain
+  $ cd repo-source
+  $ hg push ../repo-chain
+  pushing to ../repo-chain
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 80 changesets with 44 changes to 25 files (+39 heads)
+  $ cd ../repo-chain
+#endif
+
+#if push-upgrade
+  $ cat >> $HGRCPATH << EOF
+  > [format]
+  > exp-use-copies-side-data-changeset = yes
+  > [experimental]
+  > changegroup4 = yes
+  > EOF
+  $ cd ..
+  $ mv repo-chain repo-source
+  $ hg init repo-chain
+  $ cd repo-source
+  $ hg push ../repo-chain
+  pushing to ../repo-chain
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 80 changesets with 44 changes to 25 files (+39 heads)
+  $ cd ../repo-chain
+#endif
 
 #if no-compatibility no-filelog no-changeset
 
@@ -3405,12 +3506,7 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBF-change-m-0")'
   M b
   A d
-    h (filelog !)
-    h (sidedata !)
-    h (upgraded !)
-    h (upgraded-parallel !)
-    h (changeset !)
-    h (compatibility !)
+    h
   A t
     p
   R a
@@ -3564,24 +3660,15 @@
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm")' f
   A f
-    a (filelog !)
-    a (sidedata !)
-    a (upgraded !)
-    a (upgraded-parallel !)
+    a (no-changeset no-compatibility !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE,Km")' f
   A f
-    a (filelog !)
-    a (sidedata !)
-    a (upgraded !)
-    a (upgraded-parallel !)
+    a (no-changeset no-compatibility !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AEm")' f
   A f
-    a (filelog !)
-    a (sidedata !)
-    a (upgraded !)
-    a (upgraded-parallel !)
+    a (no-changeset no-compatibility !)
 
 
 The result from mEAm is the same for the subsequent merge:
@@ -3589,23 +3676,17 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm")' f
   A f
     a (filelog !)
-    b (sidedata !)
-    b (upgraded !)
-    b (upgraded-parallel !)
+    b (no-changeset no-compatibility no-filelog !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA,Jm")' f
   A f
     a (filelog !)
-    b (sidedata !)
-    b (upgraded !)
-    b (upgraded-parallel !)
+    b (no-changeset no-compatibility no-filelog !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EAm")' f
   A f
     a (filelog !)
-    b (sidedata !)
-    b (upgraded !)
-    b (upgraded-parallel !)
+    b (no-changeset no-compatibility no-filelog !)
 
 Subcase: chaining conflicting rename resolution
 ```````````````````````````````````````````````
@@ -3620,24 +3701,17 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm")' v
   A v
     r (filelog !)
-    p (sidedata !)
-    p (upgraded !)
-    p (upgraded-parallel !)
+    p (no-changeset no-compatibility no-filelog !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQ,Tm")' v
   A v
     r (filelog !)
-    p (sidedata !)
-    p (upgraded !)
-    p (upgraded-parallel !)
+    p (no-changeset no-compatibility no-filelog !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mT,PQm")' v
   A v
     r (filelog !)
-    p (sidedata !)
-    p (upgraded !)
-    p (upgraded-parallel !)
-
+    p (no-changeset no-compatibility no-filelog !)
 
 The result from mQPm is the same for the subsequent merge:
 
@@ -3652,9 +3726,7 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mS,QPm")' v
   A v
     r (filelog !)
-    r (sidedata !)
-    r (upgraded !)
-    r (upgraded-parallel !)
+    r (no-changeset no-compatibility no-filelog !)
 
 
 Subcase: chaining salvage information during a merge
@@ -3733,30 +3805,22 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm")' d
   A d
     a (filelog !)
-    h (sidedata !)
-    h (upgraded !)
-    h (upgraded-parallel !)
+    h (no-changeset no-compatibility no-filelog !)
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm")' d
   A d
     a (filelog !)
-    a (sidedata !)
-    a (upgraded !)
-    a (upgraded-parallel !)
+    a (no-changeset no-compatibility no-filelog !)
 
 Chained output
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mO,FGm")' d
   A d
     a (filelog !)
-    h (sidedata !)
-    h (upgraded !)
-    h (upgraded-parallel !)
+    h (no-changeset no-compatibility no-filelog !)
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFG,Om")' d
   A d
     a (filelog !)
-    h (sidedata !)
-    h (upgraded !)
-    h (upgraded-parallel !)
+    h (no-changeset no-compatibility no-filelog !)
 
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGF,Nm")' d
@@ -3779,17 +3843,11 @@
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m")' f
   A f
-    a (filelog !)
-    a (sidedata !)
-    a (upgraded !)
-    a (upgraded-parallel !)
+    a (no-changeset no-compatibility !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change,Km")' f
   A f
-    a (filelog !)
-    a (sidedata !)
-    a (upgraded !)
-    a (upgraded-parallel !)
+    a (no-changeset no-compatibility !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AE-change-m")' f
   A f
@@ -3801,20 +3859,14 @@
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m")' f
   A f
     a (filelog !)
-    b (sidedata !)
-    b (upgraded !)
-    b (upgraded-parallel !)
+    b (no-changeset no-compatibility no-filelog !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change,Jm")' f
   A f
     a (filelog !)
-    b (sidedata !)
-    b (upgraded !)
-    b (upgraded-parallel !)
+    b (no-changeset no-compatibility no-filelog !)
 
   $ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EA-change-m")' f
   A f
     a (filelog !)
-    b (sidedata !)
-    b (upgraded !)
-    b (upgraded-parallel !)
+    b (no-changeset no-compatibility no-filelog !)
--- a/tests/test-copies-in-changeset.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-copies-in-changeset.t	Mon May 17 15:05:24 2021 +0200
@@ -419,7 +419,7 @@
 Test upgrading/downgrading to sidedata storage
 ==============================================
 
-downgrading (keeping some sidedata)
+downgrading
 
   $ hg debugformat -v
   format-variant     repo config default
@@ -445,8 +445,9 @@
   $ hg debugsidedata -m -- 0
   $ cat << EOF > .hg/hgrc
   > [format]
-  > exp-use-side-data = yes
   > exp-use-copies-side-data-changeset = no
+  > [experimental]
+  > revlogv2 = enable-unstable-format-and-corrupt-my-data
   > EOF
   $ hg debugupgraderepo --run --quiet --no-backup > /dev/null
   $ hg debugformat -v
@@ -465,11 +466,7 @@
   compression:        zstd   zstd    zstd (zstd !)
   compression-level:  default default default
   $ hg debugsidedata -c -- 0
-  1 sidedata entries
-   entry-0014 size 14
   $ hg debugsidedata -c -- 1
-  1 sidedata entries
-   entry-0014 size 14
   $ hg debugsidedata -m -- 0
 
 upgrading
--- a/tests/test-copy.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-copy.t	Mon May 17 15:05:24 2021 +0200
@@ -115,6 +115,7 @@
   $ hg mv foo bar
   foo: not copying - file is not managed
   abort: no files to copy
+  (maybe you meant to use --after --at-rev=.)
   [10]
   $ hg st -A
   ? foo
@@ -124,14 +125,17 @@
   $ hg mv ../foo ../bar
   ../foo: not copying - file is not managed
   abort: no files to copy
+  (maybe you meant to use --after --at-rev=.)
   [10]
   $ hg mv ../foo ../bar --config ui.relative-paths=yes
   ../foo: not copying - file is not managed
   abort: no files to copy
+  (maybe you meant to use --after --at-rev=.)
   [10]
   $ hg mv ../foo ../bar --config ui.relative-paths=no
   foo: not copying - file is not managed
   abort: no files to copy
+  (maybe you meant to use --after --at-rev=.)
   [10]
   $ cd ..
   $ rmdir dir
--- a/tests/test-dirstate-race.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-dirstate-race.t	Mon May 17 15:05:24 2021 +0200
@@ -1,3 +1,11 @@
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
   $ hg init repo
   $ cd repo
   $ echo a > a
--- a/tests/test-dirstate-race2.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-dirstate-race2.t	Mon May 17 15:05:24 2021 +0200
@@ -1,3 +1,11 @@
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
 Checking the size/permissions/file-type of files stored in the
 dirstate after an update where the files are changed concurrently
 outside of hg's control.
--- a/tests/test-dirstate.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-dirstate.t	Mon May 17 15:05:24 2021 +0200
@@ -1,3 +1,11 @@
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
 ------ Test dirstate._dirs refcounting
 
   $ hg init t
--- a/tests/test-doctest.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-doctest.py	Mon May 17 15:05:24 2021 +0200
@@ -131,7 +131,6 @@
         ('mercurial.changelog', '{}'),
         ('mercurial.cmdutil', '{}'),
         ('mercurial.color', '{}'),
-        ('mercurial.config', '{}'),
         ('mercurial.dagparser', "{'optionflags': 4}"),
         ('mercurial.encoding', '{}'),
         ('mercurial.fancyopts', '{}'),
--- a/tests/test-fastannotate-hg.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-fastannotate-hg.t	Mon May 17 15:05:24 2021 +0200
@@ -482,19 +482,19 @@
 
   $ cat > ../legacyrepo.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import commit, error, extensions, node
+  > from mercurial import commit, error, extensions
   > def _filecommit(orig, repo, fctx, manifest1, manifest2,
   >                 linkrev, tr, includecopymeta, ms):
   >     fname = fctx.path()
   >     text = fctx.data()
   >     flog = repo.file(fname)
-  >     fparent1 = manifest1.get(fname, node.nullid)
-  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     fparent1 = manifest1.get(fname, repo.nullid)
+  >     fparent2 = manifest2.get(fname, repo.nullid)
   >     meta = {}
   >     copy = fctx.copysource()
   >     if copy and copy != fname:
   >         raise error.Abort('copying is not supported')
-  >     if fparent2 != node.nullid:
+  >     if fparent2 != repo.nullid:
   >         return flog.add(text, meta, tr, linkrev,
   >                         fparent1, fparent2), 'modified'
   >     raise error.Abort('only merging is supported')
--- a/tests/test-filelog.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-filelog.py	Mon May 17 15:05:24 2021 +0200
@@ -4,10 +4,7 @@
 """
 from __future__ import absolute_import, print_function
 
-from mercurial.node import (
-    hex,
-    nullid,
-)
+from mercurial.node import hex
 from mercurial import (
     hg,
     ui as uimod,
@@ -22,7 +19,7 @@
 def addrev(text, renamed=False):
     if renamed:
         # data doesn't matter. Just make sure filelog.renamed() returns True
-        meta = {b'copyrev': hex(nullid), b'copy': b'bar'}
+        meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
     else:
         meta = {}
 
@@ -30,7 +27,7 @@
     try:
         lock = repo.lock()
         t = repo.transaction(b'commit')
-        node = fl.add(text, meta, t, 0, nullid, nullid)
+        node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
         return node
     finally:
         if t:
--- a/tests/test-fix.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-fix.t	Mon May 17 15:05:24 2021 +0200
@@ -266,11 +266,11 @@
   $ hg commit -Aqm "hello"
   $ hg phase -r 0 --public
   $ hg fix -r 0
-  abort: cannot fix public changesets
+  abort: cannot fix public changesets: 6470986d2e7b
   (see 'hg help phases' for details)
   [10]
   $ hg fix -r 0 --working-dir
-  abort: cannot fix public changesets
+  abort: cannot fix public changesets: 6470986d2e7b
   (see 'hg help phases' for details)
   [10]
   $ hg cat -r tip hello.whole
@@ -1174,7 +1174,8 @@
   $ printf "two\n" > foo.whole
   $ hg commit -m "second"
   $ hg --config experimental.evolution.allowunstable=False fix -r '.^'
-  abort: cannot fix changeset with children
+  abort: cannot fix changeset, as that will orphan 1 descendants
+  (see 'hg help evolution.instability')
   [10]
   $ hg fix -r '.^'
   1 new orphan changesets
--- a/tests/test-globalopts.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-globalopts.t	Mon May 17 15:05:24 2021 +0200
@@ -419,6 +419,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -552,6 +553,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
--- a/tests/test-help-hide.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-help-hide.t	Mon May 17 15:05:24 2021 +0200
@@ -117,6 +117,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -254,6 +255,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
--- a/tests/test-help.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-help.t	Mon May 17 15:05:24 2021 +0200
@@ -169,6 +169,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -298,6 +299,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -1134,12 +1136,13 @@
       the changelog data, root/flat manifest data, treemanifest data, and
       filelogs.
   
-      There are 3 versions of changegroups: "1", "2", and "3". From a high-
+      There are 4 versions of changegroups: "1", "2", "3" and "4". From a high-
       level, versions "1" and "2" are almost exactly the same, with the only
       difference being an additional item in the *delta header*. Version "3"
       adds support for storage flags in the *delta header* and optionally
       exchanging treemanifests (enabled by setting an option on the
-      "changegroup" part in the bundle2).
+      "changegroup" part in the bundle2). Version "4" adds support for
+      exchanging sidedata (additional revision metadata not part of the digest).
   
       Changegroups when not exchanging treemanifests consist of 3 logical
       segments:
@@ -1206,8 +1209,8 @@
       existing entry (either that the recipient already has, or previously
       specified in the bundle/changegroup).
   
-      The *delta header* is different between versions "1", "2", and "3" of the
-      changegroup format.
+      The *delta header* is different between versions "1", "2", "3" and "4" of
+      the changegroup format.
   
       Version 1 (headerlen=80):
   
@@ -1236,6 +1239,15 @@
         |            |             |             |            |            |           |
         +------------------------------------------------------------------------------+
   
+      Version 4 (headerlen=103):
+  
+        +------------------------------------------------------------------------------+----------+
+        |            |             |             |            |            |           |          |
+        |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |  pflags  |
+        | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+        |            |             |             |            |            |           |          |
+        +------------------------------------------------------------------------------+----------+
+  
       The *delta data* consists of "chunklen - 4 - headerlen" bytes, which
       contain a series of *delta*s, densely packed (no separators). These deltas
       describe a diff from an existing entry (either that the recipient already
@@ -1276,11 +1288,24 @@
          delimited metadata defining an object stored elsewhere. Used by the LFS
          extension.
   
+      4096
+         Contains copy information. This revision changes files in a way that
+         could affect copy tracing. This does *not* affect changegroup handling,
+         but is relevant for other parts of Mercurial.
+  
       For historical reasons, the integer values are identical to revlog version
       1 per-revision storage flags and correspond to bits being set in this
       2-byte field. Bits were allocated starting from the most-significant bit,
       hence the reverse ordering and allocation of these flags.
   
+      The *pflags* (protocol flags) field holds bitwise flags affecting the
+      protocol itself. They are first in the header since they may affect the
+      handling of the rest of the fields in a future version. They are defined
+      as such:
+  
+      1 indicates whether to read a chunk of sidedata (of variable length) right
+        after the revision flags.
+  
       Changeset Segment
       =================
   
@@ -1301,14 +1326,14 @@
       Treemanifests Segment
       ---------------------
   
-      The *treemanifests segment* only exists in changegroup version "3", and
-      only if the 'treemanifest' param is part of the bundle2 changegroup part
-      (it is not possible to use changegroup version 3 outside of bundle2).
-      Aside from the filenames in the *treemanifests segment* containing a
-      trailing "/" character, it behaves identically to the *filelogs segment*
-      (see below). The final sub-segment is followed by an *empty chunk*
-      (logically, a sub-segment with filename size 0). This denotes the boundary
-      to the *filelogs segment*.
+      The *treemanifests segment* only exists in changegroup version "3" and
+      "4", and only if the 'treemanifest' param is part of the bundle2
+      changegroup part (it is not possible to use changegroup version 3 or 4
+      outside of bundle2). Aside from the filenames in the *treemanifests
+      segment* containing a trailing "/" character, it behaves identically to
+      the *filelogs segment* (see below). The final sub-segment is followed by
+      an *empty chunk* (logically, a sub-segment with filename size 0). This
+      denotes the boundary to the *filelogs segment*.
   
       Filelogs Segment
       ================
@@ -2274,6 +2299,13 @@
   Environment Variables
   </td></tr>
   <tr><td>
+  <a href="/help/evolution">
+  evolution
+  </a>
+  </td><td>
+  Safely rewriting history (EXPERIMENTAL)
+  </td></tr>
+  <tr><td>
   <a href="/help/extensions">
   extensions
   </a>
@@ -3639,12 +3671,13 @@
   filelogs.
   </p>
   <p>
-  There are 3 versions of changegroups: &quot;1&quot;, &quot;2&quot;, and &quot;3&quot;. From a
+  There are 4 versions of changegroups: &quot;1&quot;, &quot;2&quot;, &quot;3&quot; and &quot;4&quot;. From a
   high-level, versions &quot;1&quot; and &quot;2&quot; are almost exactly the same, with the
   only difference being an additional item in the *delta header*. Version
   &quot;3&quot; adds support for storage flags in the *delta header* and optionally
   exchanging treemanifests (enabled by setting an option on the
-  &quot;changegroup&quot; part in the bundle2).
+  &quot;changegroup&quot; part in the bundle2). Version &quot;4&quot; adds support for exchanging
+  sidedata (additional revision metadata not part of the digest).
   </p>
   <p>
   Changegroups when not exchanging treemanifests consist of 3 logical
@@ -3724,8 +3757,8 @@
   bundle/changegroup).
   </p>
   <p>
-  The *delta header* is different between versions &quot;1&quot;, &quot;2&quot;, and
-  &quot;3&quot; of the changegroup format.
+  The *delta header* is different between versions &quot;1&quot;, &quot;2&quot;, &quot;3&quot; and &quot;4&quot;
+  of the changegroup format.
   </p>
   <p>
   Version 1 (headerlen=80):
@@ -3761,6 +3794,17 @@
   +------------------------------------------------------------------------------+
   </pre>
   <p>
+  Version 4 (headerlen=103):
+  </p>
+  <pre>
+  +------------------------------------------------------------------------------+----------+
+  |            |             |             |            |            |           |          |
+  |    node    |   p1 node   |   p2 node   | base node  | link node  |   flags   |  pflags  |
+  | (20 bytes) |  (20 bytes) |  (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+  |            |             |             |            |            |           |          |
+  +------------------------------------------------------------------------------+----------+
+  </pre>
+  <p>
   The *delta data* consists of &quot;chunklen - 4 - headerlen&quot; bytes, which contain a
   series of *delta*s, densely packed (no separators). These deltas describe a diff
   from an existing entry (either that the recipient already has, or previously
@@ -3799,6 +3843,8 @@
    <dd>Ellipsis revision. Revision hash does not match data (likely due to rewritten parents).
    <dt>8192
    <dd>Externally stored. The revision fulltext contains &quot;key:value&quot; &quot;\n&quot; delimited metadata defining an object stored elsewhere. Used by the LFS extension.
+   <dt>4096
+   <dd>Contains copy information. This revision changes files in a way that could affect copy tracing. This does *not* affect changegroup handling, but is relevant for other parts of Mercurial.
   </dl>
   <p>
   For historical reasons, the integer values are identical to revlog version 1
@@ -3806,6 +3852,15 @@
   field. Bits were allocated starting from the most-significant bit, hence the
   reverse ordering and allocation of these flags.
   </p>
+  <p>
+  The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+  itself. They are first in the header since they may affect the handling of the
+  rest of the fields in a future version. They are defined as such:
+  </p>
+  <dl>
+   <dt>1 indicates whether to read a chunk of sidedata (of variable length) right
+   <dd>after the revision flags.
+  </dl>
   <h2>Changeset Segment</h2>
   <p>
   The *changeset segment* consists of a single *delta group* holding
@@ -3823,9 +3878,9 @@
   </p>
   <h3>Treemanifests Segment</h3>
   <p>
-  The *treemanifests segment* only exists in changegroup version &quot;3&quot;, and
-  only if the 'treemanifest' param is part of the bundle2 changegroup part
-  (it is not possible to use changegroup version 3 outside of bundle2).
+  The *treemanifests segment* only exists in changegroup version &quot;3&quot; and &quot;4&quot;,
+  and only if the 'treemanifest' param is part of the bundle2 changegroup part
+  (it is not possible to use changegroup version 3 or 4 outside of bundle2).
   Aside from the filenames in the *treemanifests segment* containing a
   trailing &quot;/&quot; character, it behaves identically to the *filelogs segment*
   (see below). The final sub-segment is followed by an *empty chunk* (logically,
--- a/tests/test-hgignore.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-hgignore.t	Mon May 17 15:05:24 2021 +0200
@@ -1,3 +1,11 @@
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
   $ hg init ignorerepo
   $ cd ignorerepo
 
--- a/tests/test-hgrc.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-hgrc.t	Mon May 17 15:05:24 2021 +0200
@@ -253,9 +253,8 @@
   > [paths]
   > foo = bar
   > EOF
-  $ hg showconfig --debug paths
+  $ hg showconfig --source paths
   plain: True
-  read config from: $TESTTMP/hgrc
   $TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar
 
 Test we can skip the user configuration
--- a/tests/test-hgweb-json.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-hgweb-json.t	Mon May 17 15:05:24 2021 +0200
@@ -2272,6 +2272,10 @@
         "topic": "environment"
       },
       {
+        "summary": "Safely rewriting history (EXPERIMENTAL)",
+        "topic": "evolution"
+      },
+      {
         "summary": "Using Additional Features",
         "topic": "extensions"
       },
--- a/tests/test-histedit-obsolete.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-histedit-obsolete.t	Mon May 17 15:05:24 2021 +0200
@@ -307,7 +307,7 @@
   o  0:cb9a9f314b8b (public) a
   
   $ hg histedit -r '.~2'
-  abort: cannot edit public changesets
+  abort: cannot edit public changesets: cb9a9f314b8b, 40db8afa467b
   (see 'hg help phases' for details)
   [10]
 
--- a/tests/test-lfs-bundle.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-lfs-bundle.t	Mon May 17 15:05:24 2021 +0200
@@ -101,7 +101,7 @@
 #if windows
   $ unset LOCALAPPDATA
   $ unset APPDATA
-  $ HGRCPATH= hg config lfs --debug
+  $ HGRCPATH= hg config lfs --source
   abort: unknown lfs usercache location
   (define LOCALAPPDATA or APPDATA in the environment, or set lfs.usercache)
   [255]
@@ -109,7 +109,7 @@
 
 #if osx
   $ unset HOME
-  $ HGRCPATH= hg config lfs --debug
+  $ HGRCPATH= hg config lfs --source
   abort: unknown lfs usercache location
   (define HOME in the environment, or set lfs.usercache)
   [255]
@@ -118,7 +118,7 @@
 #if no-windows no-osx
   $ unset XDG_CACHE_HOME
   $ unset HOME
-  $ HGRCPATH= hg config lfs --debug
+  $ HGRCPATH= hg config lfs --source
   abort: unknown lfs usercache location
   (define XDG_CACHE_HOME or HOME in the environment, or set lfs.usercache)
   [255]
--- a/tests/test-lfs-serve.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-lfs-serve.t	Mon May 17 15:05:24 2021 +0200
@@ -355,11 +355,11 @@
   # LFS required- both lfs and non-lfs revlogs have 0x2000 flag
   *** runcommand debugprocessors lfs.bin -R ../server
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   registered processor '0x2000'
   *** runcommand debugprocessors nonlfs2.txt -R ../server
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   registered processor '0x2000'
   *** runcommand config extensions --cwd ../server
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -368,7 +368,7 @@
   # LFS not enabled- revlogs don't have 0x2000 flag
   *** runcommand debugprocessors nonlfs3.txt
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   *** runcommand config extensions
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
 
@@ -411,11 +411,11 @@
   # LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
   *** runcommand debugprocessors lfs.bin -R ../server
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   registered processor '0x2000'
   *** runcommand debugprocessors nonlfs2.txt -R ../server
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   registered processor '0x2000'
   *** runcommand config extensions --cwd ../server
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -424,7 +424,7 @@
   # LFS enabled without requirement- revlogs have 0x2000 flag
   *** runcommand debugprocessors nonlfs3.txt
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   registered processor '0x2000'
   *** runcommand config extensions
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -433,7 +433,7 @@
   # LFS disabled locally- revlogs don't have 0x2000 flag
   *** runcommand debugprocessors nonlfs.txt -R ../nonlfs
   registered processor '0x8000'
-  registered processor '0x800'
+  registered processor '0x1000'
   *** runcommand config extensions --cwd ../nonlfs
   extensions.debugprocessors=$TESTTMP/debugprocessors.py
   extensions.lfs=!
--- a/tests/test-lfs.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-lfs.t	Mon May 17 15:05:24 2021 +0200
@@ -785,8 +785,8 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -895,9 +895,9 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
   checked 5 changesets with 10 changes to 4 files
@@ -939,8 +939,8 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -965,9 +965,9 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l.i:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large.i:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
   checked 5 changesets with 10 changes to 4 files
@@ -985,7 +985,7 @@
 Accessing a corrupt file will complain
 
   $ hg --cwd fromcorrupt2 cat -r 0 large
-  abort: integrity check failed on data/large.i:0
+  abort: integrity check failed on data/large:0
   [50]
 
 lfs -> normal -> lfs round trip conversions are possible.  The 'none()'
--- a/tests/test-manifest.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-manifest.py	Mon May 17 15:05:24 2021 +0200
@@ -81,12 +81,12 @@
         raise NotImplementedError('parsemanifest not implemented by test case')
 
     def testEmptyManifest(self):
-        m = self.parsemanifest(EMTPY_MANIFEST)
+        m = self.parsemanifest(20, EMTPY_MANIFEST)
         self.assertEqual(0, len(m))
         self.assertEqual([], list(m))
 
     def testManifest(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
         self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
         self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
@@ -95,20 +95,16 @@
         with self.assertRaises(KeyError):
             m[b'wat']
 
-    def testManifestLongHashes(self):
-        m = self.parsemanifest(b'a\0' + b'f' * 64 + b'\n')
-        self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
-
     def testSetItem(self):
         want = BIN_HASH_1
 
-        m = self.parsemanifest(EMTPY_MANIFEST)
+        m = self.parsemanifest(20, EMTPY_MANIFEST)
         m[b'a'] = want
         self.assertIn(b'a', m)
         self.assertEqual(want, m[b'a'])
         self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
 
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         m[b'a'] = want
         self.assertEqual(want, m[b'a'])
         self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
@@ -116,14 +112,14 @@
     def testSetFlag(self):
         want = b'x'
 
-        m = self.parsemanifest(EMTPY_MANIFEST)
+        m = self.parsemanifest(20, EMTPY_MANIFEST)
         # first add a file; a file-less flag makes no sense
         m[b'a'] = BIN_HASH_1
         m.setflag(b'a', want)
         self.assertEqual(want, m.flags(b'a'))
         self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
 
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         # first add a file; a file-less flag makes no sense
         m[b'a'] = BIN_HASH_1
         m.setflag(b'a', want)
@@ -133,7 +129,7 @@
         )
 
     def testCopy(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         m[b'a'] = BIN_HASH_1
         m2 = m.copy()
         del m
@@ -142,7 +138,7 @@
     def testCompaction(self):
         unhex = binascii.unhexlify
         h1, h2 = unhex(HASH_1), unhex(HASH_2)
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         m[b'alpha'] = h1
         m[b'beta'] = h2
         del m[b'foo']
@@ -164,7 +160,7 @@
             m[b'foo']
 
     def testMatchException(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
 
         def filt(path):
@@ -177,7 +173,7 @@
             m._matches(match)
 
     def testRemoveItem(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         del m[b'foo']
         with self.assertRaises(KeyError):
             m[b'foo']
@@ -193,9 +189,9 @@
         addl = b'z-only-in-left\0' + HASH_1 + b'\n'
         addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
         left = self.parsemanifest(
-            A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
+            20, A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
         )
-        right = self.parsemanifest(A_SHORT_MANIFEST + addr)
+        right = self.parsemanifest(20, A_SHORT_MANIFEST + addr)
         want = {
             b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
             b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
@@ -208,14 +204,18 @@
             b'foo': (MISSING, (BIN_HASH_3, b'x')),
             b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
         }
-        self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
+        self.assertEqual(
+            want, self.parsemanifest(20, EMTPY_MANIFEST).diff(left)
+        )
 
         want = {
             b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
             b'foo': ((BIN_HASH_3, b'x'), MISSING),
             b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
         }
-        self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
+        self.assertEqual(
+            want, left.diff(self.parsemanifest(20, EMTPY_MANIFEST))
+        )
         copy = right.copy()
         del copy[b'z-only-in-right']
         del right[b'foo']
@@ -225,7 +225,7 @@
         }
         self.assertEqual(want, right.diff(copy))
 
-        short = self.parsemanifest(A_SHORT_MANIFEST)
+        short = self.parsemanifest(20, A_SHORT_MANIFEST)
         pruned = short.copy()
         del pruned[b'foo']
         want = {
@@ -247,27 +247,27 @@
             l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
         )
         try:
-            self.parsemanifest(backwards)
+            self.parsemanifest(20, backwards)
             self.fail('Should have raised ValueError')
         except ValueError as v:
             self.assertIn('Manifest lines not in sorted order.', str(v))
 
     def testNoTerminalNewline(self):
         try:
-            self.parsemanifest(A_SHORT_MANIFEST + b'wat')
+            self.parsemanifest(20, A_SHORT_MANIFEST + b'wat')
             self.fail('Should have raised ValueError')
         except ValueError as v:
             self.assertIn('Manifest did not end in a newline.', str(v))
 
     def testNoNewLineAtAll(self):
         try:
-            self.parsemanifest(b'wat')
+            self.parsemanifest(20, b'wat')
             self.fail('Should have raised ValueError')
         except ValueError as v:
             self.assertIn('Manifest did not end in a newline.', str(v))
 
     def testHugeManifest(self):
-        m = self.parsemanifest(A_HUGE_MANIFEST)
+        m = self.parsemanifest(20, A_HUGE_MANIFEST)
         self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
         self.assertEqual(len(m), len(list(m)))
 
@@ -275,7 +275,7 @@
         """Tests matches() for a few specific files to make sure that both
         the set of files as well as their flags and nodeids are correct in
         the resulting manifest."""
-        m = self.parsemanifest(A_HUGE_MANIFEST)
+        m = self.parsemanifest(20, A_HUGE_MANIFEST)
 
         match = matchmod.exact([b'file1', b'file200', b'file300'])
         m2 = m._matches(match)
@@ -291,7 +291,7 @@
         """Tests matches() for a small set of specific files, including one
         nonexistent file to make sure in only matches against existing files.
         """
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.exact(
             [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
@@ -305,7 +305,7 @@
     def testMatchesNonexistentDirectory(self):
         """Tests matches() for a relpath match on a directory that doesn't
         actually exist."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(
             util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
@@ -316,7 +316,7 @@
 
     def testMatchesExactLarge(self):
         """Tests matches() for files matching a large list of exact files."""
-        m = self.parsemanifest(A_HUGE_MANIFEST)
+        m = self.parsemanifest(20, A_HUGE_MANIFEST)
 
         flist = m.keys()[80:300]
         match = matchmod.exact(flist)
@@ -326,7 +326,7 @@
 
     def testMatchesFull(self):
         '''Tests matches() for what should be a full match.'''
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
         m2 = m._matches(match)
@@ -336,7 +336,7 @@
     def testMatchesDirectory(self):
         """Tests matches() on a relpath match on a directory, which should
         match against all files within said directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(
             util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
@@ -362,7 +362,7 @@
         """Tests matches() on an exact match on a directory, which should
         result in an empty manifest because you can't perform an exact match
         against a directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.exact([b'a/b'])
         m2 = m._matches(match)
@@ -372,7 +372,7 @@
     def testMatchesCwd(self):
         """Tests matches() on a relpath match with the current directory ('.')
         when not in the root directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(
             util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
@@ -397,7 +397,7 @@
     def testMatchesWithPattern(self):
         """Tests matches() for files matching a pattern that reside
         deeper than the specified directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
         m2 = m._matches(match)
@@ -408,8 +408,12 @@
 
 
 class testmanifestdict(unittest.TestCase, basemanifesttests):
-    def parsemanifest(self, text):
-        return manifestmod.manifestdict(text)
+    def parsemanifest(self, nodelen, text):
+        return manifestmod.manifestdict(nodelen, text)
+
+    def testManifestLongHashes(self):
+        m = self.parsemanifest(32, b'a\0' + b'f' * 64 + b'\n')
+        self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
 
     def testObviouslyBogusManifest(self):
         # This is a 163k manifest that came from oss-fuzz. It was a
@@ -433,15 +437,15 @@
             b'\xac\xbe'
         )
         with self.assertRaises(ValueError):
-            self.parsemanifest(data)
+            self.parsemanifest(20, data)
 
 
 class testtreemanifest(unittest.TestCase, basemanifesttests):
-    def parsemanifest(self, text):
+    def parsemanifest(self, nodelen, text):
         return manifestmod.treemanifest(sha1nodeconstants, b'', text)
 
     def testWalkSubtrees(self):
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         dirs = [s._dir for s in m.walksubtrees()]
         self.assertEqual(
--- a/tests/test-merge-subrepos.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-merge-subrepos.t	Mon May 17 15:05:24 2021 +0200
@@ -61,7 +61,7 @@
   > --config blackbox.track='command commandfinish'
   9bfe45a197d7+ tip
   $ cat .hg/blackbox.log
-  * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --cmdserver chgunix * (glob) (chg !)
+  * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
   * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* (glob)
   * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* exited 0 * (glob)
 
--- a/tests/test-narrow-clone-non-narrow-server.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow-clone-non-narrow-server.t	Mon May 17 15:05:24 2021 +0200
@@ -57,6 +57,7 @@
   comparing with http://localhost:$HGPORT1/
   searching for changes
   looking for local changes to affected paths
+  deleting unwanted files from working copy
 
   $ hg tracked --addinclude f1 http://localhost:$HGPORT1/
   nothing to widen or narrow
--- a/tests/test-narrow-exchange.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow-exchange.t	Mon May 17 15:05:24 2021 +0200
@@ -105,7 +105,7 @@
   remote: adding file changes
   remote: transaction abort!
   remote: rollback completed
-  remote: abort: data/inside2/f.i@4a1aa07735e673e20c00fae80f40dc301ee30616: unknown parent (reporevlogstore !)
+  remote: abort: data/inside2/f@4a1aa07735e673e20c00fae80f40dc301ee30616: unknown parent (reporevlogstore !)
   remote: abort: data/inside2/f/index@4a1aa07735e6: no node (reposimplestore !)
   abort: stream ended unexpectedly (got 0 bytes, expected 4)
   [255]
@@ -218,8 +218,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
-  remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+  remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   remote: transaction abort! (lfs-on !)
   remote: rollback completed (lfs-on !)
-  remote: abort: data/inside2/f.i@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+  remote: abort: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-narrow-patterns.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow-patterns.t	Mon May 17 15:05:24 2021 +0200
@@ -193,6 +193,7 @@
   deleting data/dir1/dirA/bar.i (reporevlogstore !)
   deleting data/dir1/dirA/bar/0eca1d0cbdaea4651d1d04d71976a6d2d9bfaae5 (reposimplestore !)
   deleting data/dir1/dirA/bar/index (reposimplestore !)
+  deleting unwanted files from working copy
   saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
   adding changesets
   adding manifests
@@ -249,6 +250,7 @@
   deleting data/dir1/dirA/foo.i (reporevlogstore !)
   deleting data/dir1/dirA/foo/162caeb3d55dceb1fee793aa631ac8c73fcb8b5e (reposimplestore !)
   deleting data/dir1/dirA/foo/index (reposimplestore !)
+  deleting unwanted files from working copy
   saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
   adding changesets
   adding manifests
--- a/tests/test-narrow-pull.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow-pull.t	Mon May 17 15:05:24 2021 +0200
@@ -147,7 +147,7 @@
   $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
   $ cd narrow2
   $ hg pull -q -r 1
-  remote: abort: unexpected error: unable to resolve parent while packing '00manifest.i' 1 for changeset 0
+  remote: abort: unexpected error: unable to resolve parent while packing '00manifest' 1 for changeset 0
   transaction abort!
   rollback completed
   abort: pull failed on remote
--- a/tests/test-narrow-share.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow-share.t	Mon May 17 15:05:24 2021 +0200
@@ -94,6 +94,7 @@
   deleting meta/d1/00manifest.i (tree !)
   deleting meta/d3/00manifest.i (tree !)
   deleting meta/d5/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ hg -R main tracked
   I path:d7
   $ hg -R main files
--- a/tests/test-narrow-trackedcmd.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow-trackedcmd.t	Mon May 17 15:05:24 2021 +0200
@@ -150,6 +150,7 @@
   looking for local changes to affected paths
   deleting data/inside/f.i
   deleting meta/inside/00manifest.i (tree !)
+  deleting unwanted files from working copy
   saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
   adding changesets
   adding manifests
@@ -191,6 +192,7 @@
   looking for local changes to affected paths
   deleting data/widest/f.i
   deleting meta/widest/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:outisde
   I path:wider
--- a/tests/test-narrow.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-narrow.t	Mon May 17 15:05:24 2021 +0200
@@ -132,12 +132,14 @@
   looking for local changes to affected paths
   The following changeset(s) or their ancestors have local changes not on the remote:
   * (glob)
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d0/f.i (reporevlogstore !)
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
 
   $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
   7: local change to d3 
@@ -164,12 +166,14 @@
   comparing with ssh://user@dummy/master
   searching for changes
   looking for local changes to affected paths
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d0/f.i (reporevlogstore !)
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
 
 Updates off of stripped commit if necessary
   $ hg co -r 'desc("local change to d3")' -q
@@ -183,12 +187,14 @@
   * (glob)
   * (glob)
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d3/f.i (reporevlogstore !)
   deleting meta/d3/00manifest.i (tree !)
   deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
   deleting data/d3/f/99fa7136105a15e2045ce3d9152e4837c5349e4d (reposimplestore !)
   deleting data/d3/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg log -T '{desc}\n' -r .
   add d10/f
 Updates to nullid if necessary
@@ -206,12 +212,14 @@
   The following changeset(s) or their ancestors have local changes not on the remote:
   * (glob)
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d3/f.i (reporevlogstore !)
   deleting meta/d3/00manifest.i (tree !)
   deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
   deleting data/d3/f/5ce0767945cbdbca3b924bb9fbf5143f72ab40ac (reposimplestore !)
   deleting data/d3/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg id
   000000000000
   $ cd ..
@@ -272,6 +280,7 @@
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   $ hg files
   [1]
@@ -332,6 +341,7 @@
   deleting meta/d6/00manifest.i (tree !)
   deleting data/d6/f/7339d30678f451ac8c3f38753beeb4cf2e1655c7 (reposimplestore !)
   deleting data/d6/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d0
   I path:d3
@@ -355,6 +365,7 @@
   deleting data/d3/f.i (reporevlogstore !)
   deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
   deleting data/d3/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d0
   I path:d3
@@ -378,6 +389,7 @@
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d3
   I path:d9
@@ -478,11 +490,13 @@
   path:d2
   remove these unused includes (yn)? y
   looking for local changes to affected paths
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d0/f.i
   deleting data/d2/f.i
   deleting meta/d0/00manifest.i (tree !)
   deleting meta/d2/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d1
   $ hg files
@@ -504,10 +518,12 @@
   path:d2
   remove these unused includes (yn)? y
   looking for local changes to affected paths
+  deleting unwanted changesets
   deleting data/d0/f.i
   deleting data/d2/f.i
   deleting meta/d0/00manifest.i (tree !)
   deleting meta/d2/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ ls .hg/strip-backup/
 
 
@@ -521,4 +537,5 @@
   looking for local changes to affected paths
   deleting data/d0/f.i
   deleting meta/d0/00manifest.i (tree !)
+  deleting unwanted files from working copy
   not deleting possibly dirty file d0/f
--- a/tests/test-obshistory.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-obshistory.t	Mon May 17 15:05:24 2021 +0200
@@ -13,6 +13,7 @@
   > [experimental]
   > evolution.createmarkers = yes
   > evolution.effect-flags = yes
+  > evolution.allowdivergence=true
   > EOF
 
 Test output on amended commit
--- a/tests/test-obsmarker-template.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-obsmarker-template.t	Mon May 17 15:05:24 2021 +0200
@@ -11,6 +11,7 @@
   > publish=False
   > [experimental]
   > evolution=true
+  > evolution.allowdivergence=true
   > [templates]
   > obsfatesuccessors = "{if(successors, " as ")}{join(successors, ", ")}"
   > obsfateverb = "{obsfateverb(successors, markers)}"
--- a/tests/test-parseindex2.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-parseindex2.py	Mon May 17 15:05:24 2021 +0200
@@ -14,8 +14,8 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     nullrev,
+    sha1nodeconstants,
 )
 from mercurial import (
     policy,
@@ -40,7 +40,7 @@
     s = 64
     cache = None
     index = []
-    nodemap = {nullid: nullrev}
+    nodemap = {sha1nodeconstants.nullid: nullrev}
     n = off = 0
 
     l = len(data) - s
@@ -49,6 +49,7 @@
         cache = (0, data)
         while off <= l:
             e = struct.unpack(indexformatng, data[off : off + s])
+            e = e + (0, 0)
             nodemap[e[7]] = n
             append(e)
             n += 1
@@ -58,6 +59,7 @@
     else:
         while off <= l:
             e = struct.unpack(indexformatng, data[off : off + s])
+            e = e + (0, 0)
             nodemap[e[7]] = n
             append(e)
             n += 1
@@ -227,7 +229,7 @@
 
         ix = parsers.parse_index2(data_inlined, True)[0]
         for i, r in enumerate(ix):
-            if r[7] == nullid:
+            if r[7] == sha1nodeconstants.nullid:
                 i = -1
             try:
                 self.assertEqual(
@@ -240,7 +242,7 @@
                 break
 
     def testminusone(self):
-        want = (0, 0, 0, -1, -1, -1, -1, nullid)
+        want = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
         index, junk = parsers.parse_index2(data_inlined, True)
         got = index[-1]
         self.assertEqual(want, got)  # inline data
@@ -262,7 +264,7 @@
             # node won't matter for this test, let's just make sure
             # they don't collide. Other data don't matter either.
             node = hexrev(p1) + hexrev(p2) + b'.' * 12
-            index.append((0, 0, 12, 1, 34, p1, p2, node))
+            index.append((0, 0, 12, 1, 34, p1, p2, node, 0, 0))
 
         appendrev(4)
         appendrev(5)
--- a/tests/test-paths.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-paths.t	Mon May 17 15:05:24 2021 +0200
@@ -98,6 +98,9 @@
   expand: $TESTTMP/a/$SOMETHING/bar
   $ hg log -rnull -T '{get(peerurls, "dupe")}\n'
   $TESTTMP/b#tip
+  $ hg log -rnull -T '{peerurls % "{urls|json}\n"}'
+  [{"pushurl": "https://example.com/dupe", "url": "$TESTTMP/b#tip"}]
+  [{"url": "$TESTTMP/a/$SOMETHING/bar"}]
 
  (sub options can be populated by map/dot operation)
 
--- a/tests/test-permissions.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-permissions.t	Mon May 17 15:05:24 2021 +0200
@@ -1,5 +1,13 @@
 #require unix-permissions no-root reporevlogstore
 
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
   $ hg init t
   $ cd t
 
--- a/tests/test-phabricator.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-phabricator.t	Mon May 17 15:05:24 2021 +0200
@@ -509,9 +509,8 @@
   
 A bad .arcconfig doesn't error out
   $ echo 'garbage' > .arcconfig
-  $ hg config phabricator --debug
+  $ hg config phabricator --source
   invalid JSON in $TESTTMP/repo/.arcconfig
-  read config from: */.hgrc (glob)
   */.hgrc:*: phabricator.debug=True (glob)
   $TESTTMP/repo/.hg/hgrc:*: phabricator.url=https://phab.mercurial-scm.org/ (glob)
   $TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=HG (glob)
@@ -524,8 +523,7 @@
   > EOF
   $ cp $TESTDIR/../.arcconfig .
   $ mv .hg/hgrc .hg/hgrc.bak
-  $ hg config phabricator --debug
-  read config from: */.hgrc (glob)
+  $ hg config phabricator --source
   */.hgrc:*: phabricator.debug=True (glob)
   $TESTTMP/repo/.arcconfig: phabricator.callsign=HG
   $TESTTMP/repo/.arcconfig: phabricator.url=https://phab.mercurial-scm.org/
@@ -536,8 +534,7 @@
   > url = local
   > callsign = local
   > EOF
-  $ hg config phabricator --debug
-  read config from: */.hgrc (glob)
+  $ hg config phabricator --source
   */.hgrc:*: phabricator.debug=True (glob)
   $TESTTMP/repo/.hg/hgrc:*: phabricator.url=local (glob)
   $TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=local (glob)
--- a/tests/test-pull-bundle.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-pull-bundle.t	Mon May 17 15:05:24 2021 +0200
@@ -185,7 +185,7 @@
   adding changesets
   adding manifests
   adding file changes
-  abort: 00changelog.i@66f7d451a68b85ed82ff5fcc254daf50c74144bd: no node
+  abort: 00changelog@66f7d451a68b85ed82ff5fcc254daf50c74144bd: no node
   [50]
   $ cd ..
   $ killdaemons.py
--- a/tests/test-purge.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-purge.t	Mon May 17 15:05:24 2021 +0200
@@ -1,3 +1,11 @@
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
 init
 
   $ hg init t
--- a/tests/test-racy-mutations.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-racy-mutations.t	Mon May 17 15:05:24 2021 +0200
@@ -91,7 +91,7 @@
   $ hg debugrevlogindex -c
      rev linkrev nodeid       p1           p2
        0       0 222799e2f90b 000000000000 000000000000
-       1       1 6f124f6007a0 222799e2f90b 000000000000
+       1       1 6f124f6007a0 222799e2f90b 000000000000 (missing-correct-output !)
 And, because of transactions, there's none in the manifestlog either.
   $ hg debugrevlogindex -m
      rev linkrev nodeid       p1           p2
--- a/tests/test-rebase-collapse.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-rebase-collapse.t	Mon May 17 15:05:24 2021 +0200
@@ -549,8 +549,8 @@
   o  0: f447d5abf5ea 'add'
   
   $ hg rebase --collapse -r 1 -d 0
-  abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  abort: cannot rebase changeset, as that will orphan 1 descendants
+  (see 'hg help evolution.instability')
   [10]
 
 Test collapsing in place
--- a/tests/test-rebase-scenario-global.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-rebase-scenario-global.t	Mon May 17 15:05:24 2021 +0200
@@ -328,11 +328,11 @@
   nothing to rebase
   [1]
   $ hg rebase -d 5 -b 6
-  abort: cannot rebase public changesets
+  abort: cannot rebase public changesets: e1c4361dd923
   (see 'hg help phases' for details)
   [10]
   $ hg rebase -d 5 -r '1 + (6::)'
-  abort: cannot rebase public changesets
+  abort: cannot rebase public changesets: e1c4361dd923
   (see 'hg help phases' for details)
   [10]
 
@@ -452,8 +452,8 @@
   $ hg clone -q -u . ah ah1
   $ cd ah1
   $ hg rebase -r '2::8' -d 1
-  abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  abort: cannot rebase changeset, as that will orphan 2 descendants
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '2::8' -d 1 -k
   rebasing 2:c9e50f6cdc55 "C"
@@ -498,8 +498,8 @@
   $ hg clone -q -u . ah ah2
   $ cd ah2
   $ hg rebase -r '3::8' -d 1
-  abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  abort: cannot rebase changeset, as that will orphan 2 descendants
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '3::8' -d 1 --keep
   rebasing 3:ffd453c31098 "D"
@@ -541,8 +541,8 @@
   $ hg clone -q -u . ah ah3
   $ cd ah3
   $ hg rebase -r '3::7' -d 1
-  abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  abort: cannot rebase changeset, as that will orphan 3 descendants
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '3::7' -d 1 --keep
   rebasing 3:ffd453c31098 "D"
@@ -581,8 +581,8 @@
   $ hg clone -q -u . ah ah4
   $ cd ah4
   $ hg rebase -r '3::(7+5)' -d 1
-  abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  abort: cannot rebase changeset, as that will orphan 1 descendants
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '3::(7+5)' -d 1 --keep
   rebasing 3:ffd453c31098 "D"
--- a/tests/test-remotefilelog-datapack.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-remotefilelog-datapack.py	Mon May 17 15:05:24 2021 +0200
@@ -16,7 +16,7 @@
 
 # Load the local remotefilelog, not the system one
 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
 from mercurial import policy
 
 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
@@ -63,7 +63,14 @@
 
     def createPack(self, revisions=None, packdir=None):
         if revisions is None:
-            revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
+            revisions = [
+                (
+                    b"filename",
+                    self.getFakeHash(),
+                    sha1nodeconstants.nullid,
+                    b"content",
+                )
+            ]
 
         if packdir is None:
             packdir = self.makeTempDir()
@@ -86,7 +93,7 @@
         filename = b"foo"
         node = self.getHash(content)
 
-        revisions = [(filename, node, nullid, content)]
+        revisions = [(filename, node, sha1nodeconstants.nullid, content)]
         pack = self.createPack(revisions)
         if self.paramsavailable:
             self.assertEqual(
@@ -126,7 +133,7 @@
         """Test putting multiple delta blobs into a pack and read the chain."""
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             content = b"abcdef%d" % i
             node = self.getHash(content)
@@ -157,7 +164,7 @@
             for j in range(random.randint(1, 100)):
                 content = b"content-%d" % j
                 node = self.getHash(content)
-                lastnode = nullid
+                lastnode = sha1nodeconstants.nullid
                 if len(filerevs) > 0:
                     lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
                 filerevs.append(node)
@@ -185,7 +192,9 @@
                 b'Z': b'random_string',
                 b'_': b'\0' * i,
             }
-            revisions.append((filename, node, nullid, content, meta))
+            revisions.append(
+                (filename, node, sha1nodeconstants.nullid, content, meta)
+            )
         pack = self.createPack(revisions)
         for name, node, x, content, origmeta in revisions:
             parsedmeta = pack.getmeta(name, node)
@@ -198,7 +207,7 @@
         """Test the getmissing() api."""
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             content = b"abcdef%d" % i
             node = self.getHash(content)
@@ -225,7 +234,7 @@
         pack = self.createPack()
 
         try:
-            pack.add(b'filename', nullid, b'contents')
+            pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
             self.assertTrue(False, "datapack.add should throw")
         except RuntimeError:
             pass
@@ -264,7 +273,9 @@
             content = filename
             node = self.getHash(content)
             blobs[(filename, node)] = content
-            revisions.append((filename, node, nullid, content))
+            revisions.append(
+                (filename, node, sha1nodeconstants.nullid, content)
+            )
 
         pack = self.createPack(revisions)
         if self.paramsavailable:
@@ -288,7 +299,12 @@
 
         for i in range(numpacks):
             chain = []
-            revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
+            revision = (
+                b'%d' % i,
+                self.getFakeHash(),
+                sha1nodeconstants.nullid,
+                b"content",
+            )
 
             for _ in range(revisionsperpack):
                 chain.append(revision)
@@ -346,7 +362,9 @@
                 filename = b"filename-%d" % i
                 content = b"content-%d" % i
                 node = self.getHash(content)
-                revisions.append((filename, node, nullid, content))
+                revisions.append(
+                    (filename, node, sha1nodeconstants.nullid, content)
+                )
 
             path = self.createPack(revisions).path
 
--- a/tests/test-remotefilelog-histpack.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-remotefilelog-histpack.py	Mon May 17 15:05:24 2021 +0200
@@ -13,7 +13,7 @@
 
 import silenttestrunner
 
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
 from mercurial import (
     pycompat,
     ui as uimod,
@@ -59,8 +59,8 @@
                 (
                     b"filename",
                     self.getFakeHash(),
-                    nullid,
-                    nullid,
+                    sha1nodeconstants.nullid,
+                    sha1nodeconstants.nullid,
                     self.getFakeHash(),
                     None,
                 )
@@ -119,10 +119,19 @@
         """
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             node = self.getFakeHash()
-            revisions.append((filename, node, lastnode, nullid, nullid, None))
+            revisions.append(
+                (
+                    filename,
+                    node,
+                    lastnode,
+                    sha1nodeconstants.nullid,
+                    sha1nodeconstants.nullid,
+                    None,
+                )
+            )
             lastnode = node
 
         # revisions must be added in topological order, newest first
@@ -148,17 +157,17 @@
         for i in range(100):
             filename = b"filename-%d" % i
             entries = []
-            p2 = nullid
-            linknode = nullid
+            p2 = sha1nodeconstants.nullid
+            linknode = sha1nodeconstants.nullid
             for j in range(random.randint(1, 100)):
                 node = self.getFakeHash()
-                p1 = nullid
+                p1 = sha1nodeconstants.nullid
                 if len(entries) > 0:
                     p1 = entries[random.randint(0, len(entries) - 1)]
                 entries.append(node)
                 revisions.append((filename, node, p1, p2, linknode, None))
                 allentries[(filename, node)] = (p1, p2, linknode)
-                if p1 == nullid:
+                if p1 == sha1nodeconstants.nullid:
                     ancestorcounts[(filename, node)] = 1
                 else:
                     newcount = ancestorcounts[(filename, p1)] + 1
@@ -182,10 +191,19 @@
     def testGetNodeInfo(self):
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             node = self.getFakeHash()
-            revisions.append((filename, node, lastnode, nullid, nullid, None))
+            revisions.append(
+                (
+                    filename,
+                    node,
+                    lastnode,
+                    sha1nodeconstants.nullid,
+                    sha1nodeconstants.nullid,
+                    None,
+                )
+            )
             lastnode = node
 
         pack = self.createPack(revisions)
@@ -233,7 +251,14 @@
         pack = self.createPack()
 
         try:
-            pack.add(b'filename', nullid, nullid, nullid, nullid, None)
+            pack.add(
+                b'filename',
+                sha1nodeconstants.nullid,
+                sha1nodeconstants.nullid,
+                sha1nodeconstants.nullid,
+                sha1nodeconstants.nullid,
+                None,
+            )
             self.assertTrue(False, "historypack.add should throw")
         except RuntimeError:
             pass
--- a/tests/test-remotefilelog-prefetch.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-remotefilelog-prefetch.t	Mon May 17 15:05:24 2021 +0200
@@ -237,6 +237,7 @@
   $ hg mv z2 z3
   z2: not copying - file is not managed
   abort: no files to copy
+  (maybe you meant to use --after --at-rev=.)
   [10]
   $ find $CACHEDIR -type f | sort
 .. The following output line about files fetches is globed because it is
--- a/tests/test-requires.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-requires.t	Mon May 17 15:05:24 2021 +0200
@@ -5,7 +5,7 @@
   $ hg commit -m test
   $ rm .hg/requires
   $ hg tip
-  abort: unknown version (65535) in revlog 00changelog.i
+  abort: unknown version (65535) in revlog 00changelog
   [50]
   $ echo indoor-pool > .hg/requires
   $ hg tip
--- a/tests/test-revlog-raw.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-revlog-raw.py	Mon May 17 15:05:24 2021 +0200
@@ -6,7 +6,6 @@
 import hashlib
 import sys
 
-from mercurial.node import nullid
 from mercurial import (
     encoding,
     revlog,
@@ -15,10 +14,37 @@
 )
 
 from mercurial.revlogutils import (
+    constants,
     deltas,
     flagutil,
 )
 
+
+class _NoTransaction(object):
+    """transaction like object to update the nodemap outside a transaction"""
+
+    def __init__(self):
+        self._postclose = {}
+
+    def addpostclose(self, callback_id, callback_func):
+        self._postclose[callback_id] = callback_func
+
+    def registertmp(self, *args, **kwargs):
+        pass
+
+    def addbackup(self, *args, **kwargs):
+        pass
+
+    def add(self, *args, **kwargs):
+        pass
+
+    def addabort(self, *args, **kwargs):
+        pass
+
+    def _report(self, *args):
+        pass
+
+
 # TESTTMP is optional. This makes it convenient to run without run-tests.py
 tvfs = vfs.vfs(encoding.environ.get(b'TESTTMP', b'/tmp'))
 
@@ -79,10 +105,11 @@
     return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
 
 
-def newrevlog(name=b'_testrevlog.i', recreate=False):
+def newrevlog(name=b'_testrevlog', recreate=False):
     if recreate:
-        tvfs.tryunlink(name)
-    rlog = revlog.revlog(tvfs, name)
+        tvfs.tryunlink(name + b'.i')
+    target = (constants.KIND_OTHER, b'test')
+    rlog = revlog.revlog(tvfs, target=target, radix=name)
     return rlog
 
 
@@ -93,7 +120,7 @@
     """
     nextrev = len(rlog)
     p1 = rlog.node(nextrev - 1)
-    p2 = nullid
+    p2 = rlog.nullid
     if isext:
         flags = revlog.REVIDX_EXTSTORED
     else:
@@ -110,7 +137,7 @@
         rlog._storedeltachains = True
 
 
-def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
+def addgroupcopy(rlog, tr, destname=b'_destrevlog', optimaldelta=True):
     """Copy revlog to destname using revlog.addgroup. Return the copied revlog.
 
     This emulates push or pull. They use changegroup. Changegroup requires
@@ -127,7 +154,7 @@
     class dummychangegroup(object):
         @staticmethod
         def deltachunk(pnode):
-            pnode = pnode or nullid
+            pnode = pnode or rlog.nullid
             parentrev = rlog.rev(pnode)
             r = parentrev + 1
             if r >= len(rlog):
@@ -142,7 +169,7 @@
             return {
                 b'node': rlog.node(r),
                 b'p1': pnode,
-                b'p2': nullid,
+                b'p2': rlog.nullid,
                 b'cs': rlog.node(rlog.linkrev(r)),
                 b'flags': rlog.flags(r),
                 b'deltabase': rlog.node(deltaparent),
@@ -175,7 +202,7 @@
     return dlog
 
 
-def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
+def lowlevelcopy(rlog, tr, destname=b'_destrevlog'):
     """Like addgroupcopy, but use the low level revlog._addrevision directly.
 
     It exercises some code paths that are hard to reach easily otherwise.
@@ -183,7 +210,7 @@
     dlog = newrevlog(destname, recreate=True)
     for r in rlog:
         p1 = rlog.node(r - 1)
-        p2 = nullid
+        p2 = rlog.nullid
         if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
             text = rlog.rawdata(r)
             cachedelta = None
@@ -200,19 +227,17 @@
             text = None
             cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
         flags = rlog.flags(r)
-        ifh = dfh = None
-        try:
-            ifh = dlog.opener(dlog.indexfile, b'a+')
-            if not dlog._inline:
-                dfh = dlog.opener(dlog.datafile, b'a+')
+        with dlog._writing(_NoTransaction()):
             dlog._addrevision(
-                rlog.node(r), text, tr, r, p1, p2, flags, cachedelta, ifh, dfh
+                rlog.node(r),
+                text,
+                tr,
+                r,
+                p1,
+                p2,
+                flags,
+                cachedelta,
             )
-        finally:
-            if dfh is not None:
-                dfh.close()
-            if ifh is not None:
-                ifh.close()
     return dlog
 
 
@@ -425,7 +450,7 @@
 
 
 def makesnapshot(tr):
-    rl = newrevlog(name=b'_snaprevlog3.i', recreate=True)
+    rl = newrevlog(name=b'_snaprevlog3', recreate=True)
     for i in data:
         appendrev(rl, i, tr)
     return rl
@@ -481,7 +506,7 @@
         checkrevlog(rl2, expected)
         print('addgroupcopy test passed')
         # Copy via revlog.clone
-        rl3 = newrevlog(name=b'_destrevlog3.i', recreate=True)
+        rl3 = newrevlog(name=b'_destrevlog3', recreate=True)
         rl.clone(tr, rl3)
         checkrevlog(rl3, expected)
         print('clone test passed')
--- a/tests/test-revlog-v2.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-revlog-v2.t	Mon May 17 15:05:24 2021 +0200
@@ -24,6 +24,7 @@
   dotencode
   exp-revlogv2.2
   fncache
+  generaldelta
   persistent-nodemap (rust !)
   revlog-compression-zstd (zstd !)
   sparserevlog
@@ -37,7 +38,7 @@
   ...     fh.write(b'\xff\x00\xde\xad') and None
 
   $ hg log
-  abort: unknown flags (0xff00) in version 57005 revlog 00changelog.i
+  abort: unknown flags (0xff00) in version 57005 revlog 00changelog
   [50]
 
   $ cd ..
--- a/tests/test-revlog.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-revlog.t	Mon May 17 15:05:24 2021 +0200
@@ -7,7 +7,7 @@
   ...     fh.write(b'\x00\x01\x00\x00') and None
 
   $ hg log
-  abort: unknown flags (0x01) in version 0 revlog 00changelog.i
+  abort: unknown flags (0x01) in version 0 revlog 00changelog
   [50]
 
 Unknown flags on revlog version 1 are rejected
@@ -16,7 +16,7 @@
   ...     fh.write(b'\x00\x04\x00\x01') and None
 
   $ hg log
-  abort: unknown flags (0x04) in version 1 revlog 00changelog.i
+  abort: unknown flags (0x04) in version 1 revlog 00changelog
   [50]
 
 Unknown version is rejected
@@ -25,7 +25,7 @@
   ...     fh.write(b'\x00\x00\xbe\xef') and None
 
   $ hg log
-  abort: unknown version (48879) in revlog 00changelog.i
+  abort: unknown version (48879) in revlog 00changelog
   [50]
 
   $ cd ..
@@ -45,9 +45,10 @@
        0       2 99e0332bd498 000000000000 000000000000
        1       3 6674f57a23d8 99e0332bd498 000000000000
 
+  >>> from mercurial.revlogutils.constants import KIND_OTHER
   >>> from mercurial import revlog, vfs
   >>> tvfs = vfs.vfs(b'.')
   >>> tvfs.options = {b'revlogv1': True}
-  >>> rl = revlog.revlog(tvfs, b'a.i')
+  >>> rl = revlog.revlog(tvfs, target=(KIND_OTHER, b'test'), radix=b'a')
   >>> rl.revision(1)
   mpatchError(*'patch cannot be decoded'*) (glob)
--- a/tests/test-setdiscovery.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-setdiscovery.t	Mon May 17 15:05:24 2021 +0200
@@ -1536,7 +1536,7 @@
   searching for changes
   101 102 103 104 105 106 107 108 109 110  (no-eol)
   $ hg -R r1 --config extensions.blackbox= blackbox --config blackbox.track=
-  * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --cmdserver chgunix * (glob) (chg !)
+  * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
   * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* (glob)
   * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> found 101 common and 1 unknown server heads, 1 roundtrips in *.????s (glob)
   * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* exited 0 after *.?? seconds (glob)
--- a/tests/test-sidedata-exchange.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-sidedata-exchange.t	Mon May 17 15:05:24 2021 +0200
@@ -8,12 +8,12 @@
 Pusher and pushed have sidedata enabled
 ---------------------------------------
 
-  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> sidedata-source/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
   > EOF
-  $ hg init sidedata-target --config format.exp-use-side-data=yes
+  $ hg init sidedata-target --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> sidedata-target/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
@@ -71,12 +71,12 @@
 ---------------------------------------
 
   $ rm -rf sidedata-source sidedata-target
-  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> sidedata-source/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
   > EOF
-  $ hg init sidedata-target --config format.exp-use-side-data=yes
+  $ hg init sidedata-target --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> sidedata-target/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
@@ -138,12 +138,12 @@
 --------------------------------------------
 
   $ rm -rf sidedata-source sidedata-target
-  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> sidedata-source/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
   > EOF
-  $ hg init sidedata-target --config format.exp-use-side-data=no
+  $ hg init sidedata-target --config experimental.revlogv2=no
   $ cd sidedata-source
   $ echo a > a
   $ echo b > b
@@ -186,12 +186,12 @@
 --------------------------------------------
 
   $ rm -rf sidedata-source sidedata-target
-  $ hg init sidedata-source --config format.exp-use-side-data=yes
+  $ hg init sidedata-source --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> sidedata-source/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata-5.py
   > EOF
-  $ hg init sidedata-target --config format.exp-use-side-data=no
+  $ hg init sidedata-target --config experimental.revlogv2=no
   $ cd sidedata-source
   $ echo a > a
   $ echo b > b
@@ -239,8 +239,8 @@
 (Push) Target has strict superset of the source
 -----------------------------------------------
 
-  $ hg init source-repo --config format.exp-use-side-data=yes
-  $ hg init target-repo --config format.exp-use-side-data=yes
+  $ hg init source-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
+  $ hg init target-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> target-repo/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata.py
@@ -311,12 +311,12 @@
 target.
 
   $ rm -rf source-repo target-repo
-  $ hg init source-repo --config format.exp-use-side-data=yes
+  $ hg init source-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> source-repo/.hg/hgrc
   > [extensions]
   > testsidedata3=$TESTDIR/testlib/ext-sidedata-3.py
   > EOF
-  $ hg init target-repo --config format.exp-use-side-data=yes
+  $ hg init target-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> target-repo/.hg/hgrc
   > [extensions]
   > testsidedata4=$TESTDIR/testlib/ext-sidedata-4.py
@@ -412,8 +412,8 @@
 -----------------------------------------------
 
   $ rm -rf source-repo target-repo
-  $ hg init source-repo --config format.exp-use-side-data=yes
-  $ hg init target-repo --config format.exp-use-side-data=yes
+  $ hg init source-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
+  $ hg init target-repo --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cat << EOF >> target-repo/.hg/hgrc
   > [extensions]
   > testsidedata=$TESTDIR/testlib/ext-sidedata.py
--- a/tests/test-sidedata.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-sidedata.t	Mon May 17 15:05:24 2021 +0200
@@ -10,7 +10,7 @@
   > testsidedata=$TESTDIR/testlib/ext-sidedata.py
   > EOF
 
-  $ hg init test-sidedata --config format.exp-use-side-data=yes
+  $ hg init test-sidedata --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ cd test-sidedata
   $ echo aaa > a
   $ hg add a
@@ -48,7 +48,7 @@
 Check that we can upgrade to sidedata
 -------------------------------------
 
-  $ hg init up-no-side-data --config format.exp-use-side-data=no
+  $ hg init up-no-side-data --config experimental.revlogv2=no
   $ hg debugformat -v -R up-no-side-data
   format-variant     repo config default
   fncache:            yes    yes     yes
@@ -64,7 +64,7 @@
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zstd (zstd !)
   compression-level:  default default default
-  $ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
+  $ hg debugformat -v -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   format-variant     repo config default
   fncache:            yes    yes     yes
   dotencode:          yes    yes     yes
@@ -79,12 +79,12 @@
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zstd (zstd !)
   compression-level:  default default default
-  $ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
+  $ hg debugupgraderepo -R up-no-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data > /dev/null
 
 Check that we can downgrade from sidedata
 -----------------------------------------
 
-  $ hg init up-side-data --config format.exp-use-side-data=yes
+  $ hg init up-side-data --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data
   $ hg debugformat -v -R up-side-data
   format-variant     repo config default
   fncache:            yes    yes     yes
@@ -100,7 +100,7 @@
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zstd (zstd !)
   compression-level:  default default default
-  $ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
+  $ hg debugformat -v -R up-side-data --config experimental.revlogv2=no
   format-variant     repo config default
   fncache:            yes    yes     yes
   dotencode:          yes    yes     yes
@@ -115,4 +115,4 @@
   compression:        zlib   zlib    zlib (no-zstd !)
   compression:        zstd   zstd    zstd (zstd !)
   compression-level:  default default default
-  $ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
+  $ hg debugupgraderepo -R up-side-data --config experimental.revlogv2=no > /dev/null
--- a/tests/test-single-head.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-single-head.t	Mon May 17 15:05:24 2021 +0200
@@ -65,6 +65,9 @@
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit c_dD0
   created new head
+  $ hg log -r 'heads(::branch("default"))' -T '{node|short}\n'
+  286d02a6e2a2
+  9bf953aa81f6
   $ hg push -f
   pushing to $TESTTMP/single-head-server
   searching for changes
--- a/tests/test-split.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-split.t	Mon May 17 15:05:24 2021 +0200
@@ -77,7 +77,7 @@
 
   $ hg phase --public -r 'all()'
   $ hg split .
-  abort: cannot split public changesets
+  abort: cannot split public changesets: 1df0d5c5a3ab
   (see 'hg help phases' for details)
   [10]
 
@@ -466,7 +466,8 @@
   $ cd $TESTTMP/d
 #if obsstore-off
   $ runsplit -r 1 --no-rebase
-  abort: cannot split changeset with children
+  abort: cannot split changeset, as that will orphan 3 descendants
+  (see 'hg help evolution.instability')
   [10]
 #else
   $ runsplit -r 1 --no-rebase >/dev/null
@@ -517,7 +518,8 @@
   $ eval `hg tags -T '{tag}={node}\n'`
   $ rm .hg/localtags
   $ hg split $B --config experimental.evolution=createmarkers
-  abort: cannot split changeset with children
+  abort: cannot split changeset, as that will orphan 4 descendants
+  (see 'hg help evolution.instability')
   [10]
   $ cat > $TESTTMP/messages <<EOF
   > Split B
--- a/tests/test-status.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-status.t	Mon May 17 15:05:24 2021 +0200
@@ -1,3 +1,11 @@
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
   $ hg init repo1
   $ cd repo1
   $ mkdir a b a/1 b/1 b/2
@@ -681,6 +689,32 @@
   $ ln -s ../repo0/.hg
   $ hg status
 
+If the size hasn’t changed but mtime has, status needs to read the contents
+of the file to check whether it has changed
+
+  $ echo 1 > a
+  $ echo 1 > b
+  $ touch -t 200102030000 a b
+  $ hg commit -Aqm '#0'
+  $ echo 2 > a
+  $ touch -t 200102040000 a b
+  $ hg status
+  M a
+
+Asking specifically for the status of a deleted/removed file
+
+  $ rm a
+  $ rm b
+  $ hg status a
+  ! a
+  $ hg rm a
+  $ hg rm b
+  $ hg status a
+  R a
+  $ hg commit -qm '#1'
+  $ hg status a
+  a: $ENOENT$
+
 Check using include flag with pattern when status does not need to traverse
 the working directory (issue6483)
 
@@ -691,3 +725,147 @@
   $ hg add a.py b.rs
   $ hg st -aI "*.py"
   A a.py
+
+Also check exclude pattern
+
+  $ hg st -aX "*.rs"
+  A a.py
+
+issue6335
+When a directory containing a tracked file gets symlinked, as of 5.8
+`hg st` only gives the correct answer about clean (or deleted) files
+if also listing unknowns.
+The tree-based dirstate and status algorithm fix this:
+
+#if symlink no-dirstate-v1
+
+  $ cd ..
+  $ hg init issue6335
+  $ cd issue6335
+  $ mkdir foo
+  $ touch foo/a
+  $ hg ci -Ama
+  adding foo/a
+  $ mv foo bar
+  $ ln -s bar foo
+  $ hg status
+  ! foo/a
+  ? bar/a
+  ? foo
+
+  $ hg status -c  # incorrect output with `dirstate-v1`
+  $ hg status -cu
+  ? bar/a
+  ? foo
+  $ hg status -d  # incorrect output with `dirstate-v1`
+  ! foo/a
+  $ hg status -du
+  ! foo/a
+  ? bar/a
+  ? foo
+
+#endif
+
+
+Create a repo with files in each possible status
+
+  $ cd ..
+  $ hg init repo7
+  $ cd repo7
+  $ mkdir subdir
+  $ touch clean modified deleted removed
+  $ touch subdir/clean subdir/modified subdir/deleted subdir/removed
+  $ echo ignored > .hgignore
+  $ hg ci -Aqm '#0'
+  $ echo 1 > modified
+  $ echo 1 > subdir/modified
+  $ rm deleted
+  $ rm subdir/deleted
+  $ hg rm removed
+  $ hg rm subdir/removed
+  $ touch unknown ignored
+  $ touch subdir/unknown subdir/ignored
+
+Check the output
+
+  $ hg status
+  M modified
+  M subdir/modified
+  R removed
+  R subdir/removed
+  ! deleted
+  ! subdir/deleted
+  ? subdir/unknown
+  ? unknown
+
+  $ hg status -mard
+  M modified
+  M subdir/modified
+  R removed
+  R subdir/removed
+  ! deleted
+  ! subdir/deleted
+
+  $ hg status -A
+  M modified
+  M subdir/modified
+  R removed
+  R subdir/removed
+  ! deleted
+  ! subdir/deleted
+  ? subdir/unknown
+  ? unknown
+  I ignored
+  I subdir/ignored
+  C .hgignore
+  C clean
+  C subdir/clean
+
+Note: `hg status some-name` creates a patternmatcher which is not supported
+yet by the Rust implementation of status, but includematcher is supported.
+--include is used below for that reason
+
+Remove a directory that contains tracked files
+
+  $ rm -r subdir
+  $ hg status --include subdir
+  R subdir/removed
+  ! subdir/clean
+  ! subdir/deleted
+  ! subdir/modified
+
+… and replace it by a file
+
+  $ touch subdir
+  $ hg status --include subdir
+  R subdir/removed
+  ! subdir/clean
+  ! subdir/deleted
+  ! subdir/modified
+  ? subdir
+
+Replaced a deleted or removed file with a directory
+
+  $ mkdir deleted removed
+  $ touch deleted/1 removed/1
+  $ hg status --include deleted --include removed
+  R removed
+  ! deleted
+  ? deleted/1
+  ? removed/1
+  $ hg add removed/1
+  $ hg status --include deleted --include removed
+  A removed/1
+  R removed
+  ! deleted
+  ? deleted/1
+
+Deeply nested files in an ignored directory are still listed on request
+
+  $ echo ignored-dir >> .hgignore
+  $ mkdir ignored-dir
+  $ mkdir ignored-dir/subdir
+  $ touch ignored-dir/subdir/1
+  $ hg status --ignored
+  I ignored
+  I ignored-dir/subdir/1
--- a/tests/test-symlinks.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-symlinks.t	Mon May 17 15:05:24 2021 +0200
@@ -1,5 +1,13 @@
 #require symlink
 
+#testcases dirstate-v1 dirstate-v1-tree
+
+#if dirstate-v1-tree
+#require rust
+  $ echo '[experimental]' >> $HGRCPATH
+  $ echo 'dirstate-tree.in-memory=1' >> $HGRCPATH
+#endif
+
 == tests added in 0.7 ==
 
   $ hg init test-symlinks-0.7; cd test-symlinks-0.7;
--- a/tests/test-unamend.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-unamend.t	Mon May 17 15:05:24 2021 +0200
@@ -6,6 +6,7 @@
   > glog = log -G -T '{rev}:{node|short}  {desc}'
   > [experimental]
   > evolution = createmarkers, allowunstable
+  > evolution.allowdivergence = true
   > [extensions]
   > rebase =
   > amend =
@@ -283,7 +284,8 @@
   
 
   $ hg --config experimental.evolution=createmarkers unamend
-  abort: cannot unamend changeset with children
+  abort: cannot unamend changeset, as that will orphan 3 descendants
+  (see 'hg help evolution.instability')
   [10]
 
   $ hg unamend
@@ -296,7 +298,7 @@
   $ hg phase -r . -p
   1 new phase-divergent changesets
   $ hg unamend
-  abort: cannot unamend public changesets
+  abort: cannot unamend public changesets: 03ddd6fc5af1
   (see 'hg help phases' for details)
   [10]
 
--- a/tests/test-uncommit.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-uncommit.t	Mon May 17 15:05:24 2021 +0200
@@ -51,7 +51,7 @@
 Uncommit with no commits should fail
 
   $ hg uncommit
-  abort: cannot uncommit null changeset
+  abort: cannot uncommit the null revision
   (no changeset checked out)
   [10]
 
@@ -410,7 +410,7 @@
   [20]
 
   $ hg uncommit --config experimental.uncommitondirtywdir=True
-  abort: cannot uncommit while merging
+  abort: cannot uncommit changesets while merging
   [20]
 
   $ hg status
--- a/tests/test-upgrade-repo.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-upgrade-repo.t	Mon May 17 15:05:24 2021 +0200
@@ -1448,12 +1448,13 @@
 
 #endif
 
-Check upgrading to a side-data revlog
--------------------------------------
+Check upgrading to a revlog format supporting sidedata
+------------------------------------------------------
 
 upgrade
 
-  $ hg --config format.exp-use-side-data=yes debugupgraderepo --run  --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
+  $ hg debugsidedata -c 0
+  $ hg --config experimental.revlogv2=enable-unstable-format-and-corrupt-my-data debugupgraderepo --run  --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
   upgrade will perform the following actions:
   
   requirements
@@ -1461,8 +1462,8 @@
      preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
      preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
      removed: revlogv1
-     added: exp-revlogv2.2, exp-sidedata-flag (zstd !)
-     added: exp-revlogv2.2, exp-sidedata-flag, sparserevlog (no-zstd !)
+     added: exp-revlogv2.2 (zstd !)
+     added: exp-revlogv2.2, sparserevlog (no-zstd !)
   
   processed revlogs:
     - all-filelogs
@@ -1487,7 +1488,6 @@
   $ cat .hg/requires
   dotencode
   exp-revlogv2.2
-  exp-sidedata-flag
   fncache
   generaldelta
   persistent-nodemap (rust !)
@@ -1501,14 +1501,14 @@
 
 downgrade
 
-  $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
+  $ hg debugupgraderepo --config experimental.revlogv2=no --run --no-backup --quiet
   upgrade will perform the following actions:
   
   requirements
      preserved: dotencode, fncache, generaldelta, sparserevlog, store (no-zstd !)
      preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
      preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
-     removed: exp-revlogv2.2, exp-sidedata-flag
+     removed: exp-revlogv2.2
      added: revlogv1
   
   processed revlogs:
@@ -1545,8 +1545,8 @@
 upgrade from hgrc
 
   $ cat >> .hg/hgrc << EOF
-  > [format]
-  > exp-use-side-data=yes
+  > [experimental]
+  > revlogv2=enable-unstable-format-and-corrupt-my-data
   > EOF
   $ hg debugupgraderepo --run --no-backup --quiet
   upgrade will perform the following actions:
@@ -1556,7 +1556,7 @@
      preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, sparserevlog, store (zstd no-rust !)
      preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, sparserevlog, store (rust !)
      removed: revlogv1
-     added: exp-revlogv2.2, exp-sidedata-flag
+     added: exp-revlogv2.2
   
   processed revlogs:
     - all-filelogs
@@ -1581,7 +1581,6 @@
   $ cat .hg/requires
   dotencode
   exp-revlogv2.2
-  exp-sidedata-flag
   fncache
   generaldelta
   persistent-nodemap (rust !)
--- a/tests/test-verify.t	Fri May 07 10:39:58 2021 +0200
+++ b/tests/test-verify.t	Mon May 17 15:05:24 2021 +0200
@@ -297,7 +297,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   a@1: broken revlog! (index data/a.i is corrupted)
+   a@1: broken revlog! (index data/a is corrupted)
   warning: orphan data file 'data/a.i'
   checked 2 changesets with 0 changes to 1 files
   1 warnings encountered!
@@ -351,7 +351,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   base64@0: unpacking 794cee7777cb: integrity check failed on data/base64.i:0
+   base64@0: unpacking 794cee7777cb: integrity check failed on data/base64:0
   checked 1 changesets with 1 changes to 1 files
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)
--- a/tests/testlib/ext-sidedata-2.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/testlib/ext-sidedata-2.py	Mon May 17 15:05:24 2021 +0200
@@ -14,6 +14,9 @@
 import struct
 
 from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)  # hoot
 
 
 def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -21,7 +24,7 @@
     if text is None:
         text = revlog.revision(rev)
     sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -30,21 +33,23 @@
         text = revlog.revision(rev)
     sha256 = hashlib.sha256(text).digest()
     sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def reposetup(ui, repo):
     # Sidedata keys happen to be the same as the categories, easier for testing.
-    for kind in (b'changelog', b'manifest', b'filelog'):
+    for kind in constants.ALL_KINDS:
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST1,
             (sidedatamod.SD_TEST1,),
             compute_sidedata_1,
+            0,
         )
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST2,
             (sidedatamod.SD_TEST2,),
             compute_sidedata_2,
+            0,
         )
--- a/tests/testlib/ext-sidedata-3.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/testlib/ext-sidedata-3.py	Mon May 17 15:05:24 2021 +0200
@@ -20,6 +20,9 @@
 )
 
 from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)
 
 
 def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -27,7 +30,7 @@
     if text is None:
         text = revlog.revision(rev)
     sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -36,7 +39,7 @@
         text = revlog.revision(rev)
     sha256 = hashlib.sha256(text).digest()
     sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def compute_sidedata_3(repo, revlog, rev, sidedata, text=None):
@@ -45,7 +48,7 @@
         text = revlog.revision(rev)
     sha384 = hashlib.sha384(text).digest()
     sidedata[sidedatamod.SD_TEST3] = struct.pack('>48s', sha384)
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def wrapaddrevision(
@@ -54,8 +57,8 @@
     if kwargs.get('sidedata') is None:
         kwargs['sidedata'] = {}
     sd = kwargs['sidedata']
-    sd = compute_sidedata_1(None, self, None, sd, text=text)
-    kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)
+    sd, flags = compute_sidedata_1(None, self, None, sd, text=text)
+    kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)[0]
     return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
 
 
@@ -65,24 +68,27 @@
 
 def reposetup(ui, repo):
     # Sidedata keys happen to be the same as the categories, easier for testing.
-    for kind in (b'changelog', b'manifest', b'filelog'):
+    for kind in constants.ALL_KINDS:
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST1,
             (sidedatamod.SD_TEST1,),
             compute_sidedata_1,
+            0,
         )
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST2,
             (sidedatamod.SD_TEST2,),
             compute_sidedata_2,
+            0,
         )
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST3,
             (sidedatamod.SD_TEST3,),
             compute_sidedata_3,
+            0,
         )
     repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
     repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
--- a/tests/testlib/ext-sidedata-5.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/testlib/ext-sidedata-5.py	Mon May 17 15:05:24 2021 +0200
@@ -21,6 +21,9 @@
 
 
 from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)
 
 
 def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -28,7 +31,7 @@
     if text is None:
         text = revlog.revision(rev)
     sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -37,23 +40,25 @@
         text = revlog.revision(rev)
     sha256 = hashlib.sha256(text).digest()
     sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
-    return sidedata
+    return sidedata, NO_FLAGS
 
 
 def reposetup(ui, repo):
     # Sidedata keys happen to be the same as the categories, easier for testing.
-    for kind in (b'changelog', b'manifest', b'filelog'):
+    for kind in constants.ALL_KINDS:
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST1,
             (sidedatamod.SD_TEST1,),
             compute_sidedata_1,
+            0,
         )
         repo.register_sidedata_computer(
             kind,
             sidedatamod.SD_TEST2,
             (sidedatamod.SD_TEST2,),
             compute_sidedata_2,
+            0,
         )
 
     # We don't register sidedata computers because we don't care within these
--- a/tests/testlib/ext-sidedata.py	Fri May 07 10:39:58 2021 +0200
+++ b/tests/testlib/ext-sidedata.py	Mon May 17 15:05:24 2021 +0200
@@ -10,10 +10,7 @@
 import hashlib
 import struct
 
-from mercurial.node import (
-    nullid,
-    nullrev,
-)
+from mercurial.node import nullrev
 from mercurial import (
     extensions,
     requirements,
@@ -22,6 +19,7 @@
 
 from mercurial.upgrade_utils import engine as upgrade_engine
 
+from mercurial.revlogutils import constants
 from mercurial.revlogutils import sidedata
 
 
@@ -44,9 +42,9 @@
     text, sd = orig(self, nodeorrev, *args, **kwargs)
     if getattr(self, 'sidedatanocheck', False):
         return text, sd
-    if self.version & 0xFFFF != 2:
+    if self.hassidedata:
         return text, sd
-    if nodeorrev != nullrev and nodeorrev != nullid:
+    if nodeorrev != nullrev and nodeorrev != self.nullid:
         cat1 = sd.get(sidedata.SD_TEST1)
         if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
             raise RuntimeError('text size mismatch')
@@ -57,13 +55,15 @@
     return text, sd
 
 
-def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
-    sidedatacompanion = orig(srcrepo, dstrepo)
+def wrapget_sidedata_helpers(orig, srcrepo, dstrepo):
+    repo, computers, removers = orig(srcrepo, dstrepo)
+    assert not computers and not removers  # deal with composition later
     addedreqs = dstrepo.requirements - srcrepo.requirements
-    if requirements.SIDEDATA_REQUIREMENT in addedreqs:
-        assert sidedatacompanion is None  # deal with composition later
+
+    if requirements.REVLOGV2_REQUIREMENT in addedreqs:
 
-        def sidedatacompanion(revlog, rev):
+        def computer(repo, revlog, rev, old_sidedata):
+            assert not old_sidedata  # not supported yet
             update = {}
             revlog.sidedatanocheck = True
             try:
@@ -76,16 +76,25 @@
             # and sha2 hashes
             sha256 = hashlib.sha256(text).digest()
             update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
-            return False, (), update, 0, 0
+            return update, (0, 0)
 
-    return sidedatacompanion
+        srcrepo.register_sidedata_computer(
+            constants.KIND_CHANGELOG,
+            b"whatever",
+            (sidedata.SD_TEST1, sidedata.SD_TEST2),
+            computer,
+            0,
+        )
+        dstrepo.register_wanted_sidedata(b"whatever")
+
+    return sidedata.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
 
 
 def extsetup(ui):
     extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
     extensions.wrapfunction(revlog.revlog, '_revisiondata', wrap_revisiondata)
     extensions.wrapfunction(
-        upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion
+        upgrade_engine, 'get_sidedata_helpers', wrapget_sidedata_helpers
     )