branching: merge stable into default
authorRaphaël Gomès <rgomes@octobus.net>
Mon, 03 May 2021 18:55:19 +0200
changeset 47057 7431f5ab0d2a
parent 47043 12450fbea288 (diff)
parent 47056 067f2c53fb24 (current diff)
child 47060 fde5bb5d1acf
branching: merge stable into default
hgext/git/dirstate.py
hgext/git/gitlog.py
hgext/git/index.py
hgext/phabricator.py
mercurial/hg.py
--- a/contrib/chg/chg.c	Sat May 01 00:28:39 2021 -0400
+++ b/contrib/chg/chg.c	Mon May 03 18:55:19 2021 +0200
@@ -240,13 +240,8 @@
 	const char *hgcmd = gethgcmd();
 
 	const char *baseargv[] = {
-	    hgcmd,
-	    "serve",
-	    "--cmdserver",
-	    "chgunix",
-	    "--address",
-	    opts->initsockname,
-	    "--daemon-postexec",
+	    hgcmd,     "serve",     "--no-profile",     "--cmdserver",
+	    "chgunix", "--address", opts->initsockname, "--daemon-postexec",
 	    "chdir:/",
 	};
 	size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
--- a/contrib/perf.py	Sat May 01 00:28:39 2021 -0400
+++ b/contrib/perf.py	Mon May 03 18:55:19 2021 +0200
@@ -2598,11 +2598,14 @@
     header = struct.unpack(b'>I', data[0:4])[0]
     version = header & 0xFFFF
     if version == 1:
-        revlogio = revlog.revlogio()
         inline = header & (1 << 16)
     else:
         raise error.Abort(b'unsupported revlog version: %d' % version)
 
+    parse_index_v1 = getattr(revlog, 'parse_index_v1', None)
+    if parse_index_v1 is None:
+        parse_index_v1 = revlog.revlogio().parseindex
+
     rllen = len(rl)
 
     node0 = rl.node(0)
@@ -2624,26 +2627,24 @@
             fh.read()
 
     def parseindex():
-        revlogio.parseindex(data, inline)
+        parse_index_v1(data, inline)
 
     def getentry(revornode):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
         index[revornode]
 
     def getentries(revs, count=1):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
 
         for i in range(count):
             for rev in revs:
                 index[rev]
 
     def resolvenode(node):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
         rev = getattr(index, 'rev', None)
         if rev is None:
-            nodemap = getattr(
-                revlogio.parseindex(data, inline)[0], 'nodemap', None
-            )
+            nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
             # This only works for the C code.
             if nodemap is None:
                 return
@@ -2655,12 +2656,10 @@
             pass
 
     def resolvenodes(nodes, count=1):
-        index = revlogio.parseindex(data, inline)[0]
+        index = parse_index_v1(data, inline)[0]
         rev = getattr(index, 'rev', None)
         if rev is None:
-            nodemap = getattr(
-                revlogio.parseindex(data, inline)[0], 'nodemap', None
-            )
+            nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
             # This only works for the C code.
             if nodemap is None:
                 return
--- a/hgext/absorb.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/absorb.py	Mon May 03 18:55:19 2021 +0200
@@ -38,7 +38,6 @@
 from mercurial.i18n import _
 from mercurial.node import (
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -109,7 +108,7 @@
         return b''
 
     def node(self):
-        return nullid
+        return self._repo.nullid
 
 
 def uniq(lst):
@@ -927,7 +926,7 @@
         the commit is a clone from ctx, with a (optionally) different p1, and
         different file contents replaced by memworkingcopy.
         """
-        parents = p1 and (p1, nullid)
+        parents = p1 and (p1, self.repo.nullid)
         extra = ctx.extra()
         if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
             extra[b'absorb_source'] = ctx.hex()
--- a/hgext/convert/git.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/convert/git.py	Mon May 03 18:55:19 2021 +0200
@@ -9,7 +9,7 @@
 import os
 
 from mercurial.i18n import _
-from mercurial.node import nullhex
+from mercurial.node import sha1nodeconstants
 from mercurial import (
     config,
     error,
@@ -192,7 +192,7 @@
         return heads
 
     def catfile(self, rev, ftype):
-        if rev == nullhex:
+        if rev == sha1nodeconstants.nullhex:
             raise IOError
         self.catfilepipe[0].write(rev + b'\n')
         self.catfilepipe[0].flush()
@@ -214,7 +214,7 @@
         return data
 
     def getfile(self, name, rev):
-        if rev == nullhex:
+        if rev == sha1nodeconstants.nullhex:
             return None, None
         if name == b'.hgsub':
             data = b'\n'.join([m.hgsub() for m in self.submoditer()])
@@ -228,7 +228,7 @@
         return data, mode
 
     def submoditer(self):
-        null = nullhex
+        null = sha1nodeconstants.nullhex
         for m in sorted(self.submodules, key=lambda p: p.path):
             if m.node != null:
                 yield m
@@ -317,7 +317,7 @@
                 subexists[0] = True
                 if entry[4] == b'D' or renamesource:
                     subdeleted[0] = True
-                    changes.append((b'.hgsub', nullhex))
+                    changes.append((b'.hgsub', sha1nodeconstants.nullhex))
                 else:
                     changes.append((b'.hgsub', b''))
             elif entry[1] == b'160000' or entry[0] == b':160000':
@@ -325,7 +325,7 @@
                     subexists[0] = True
             else:
                 if renamesource:
-                    h = nullhex
+                    h = sha1nodeconstants.nullhex
                 self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
                 changes.append((f, h))
 
@@ -362,7 +362,7 @@
 
         if subexists[0]:
             if subdeleted[0]:
-                changes.append((b'.hgsubstate', nullhex))
+                changes.append((b'.hgsubstate', sha1nodeconstants.nullhex))
             else:
                 self.retrievegitmodules(version)
                 changes.append((b'.hgsubstate', b''))
--- a/hgext/convert/hg.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/convert/hg.py	Mon May 03 18:55:19 2021 +0200
@@ -27,8 +27,7 @@
 from mercurial.node import (
     bin,
     hex,
-    nullhex,
-    nullid,
+    sha1nodeconstants,
 )
 from mercurial import (
     bookmarks,
@@ -160,7 +159,7 @@
                 continue
             revid = revmap.get(source.lookuprev(s[0]))
             if not revid:
-                if s[0] == nullhex:
+                if s[0] == sha1nodeconstants.nullhex:
                     revid = s[0]
                 else:
                     # missing, but keep for hash stability
@@ -179,7 +178,7 @@
 
             revid = s[0]
             subpath = s[1]
-            if revid != nullhex:
+            if revid != sha1nodeconstants.nullhex:
                 revmap = self.subrevmaps.get(subpath)
                 if revmap is None:
                     revmap = mapfile(
@@ -304,9 +303,9 @@
             parent = parents[0]
 
         if len(parents) < 2:
-            parents.append(nullid)
+            parents.append(self.repo.nullid)
         if len(parents) < 2:
-            parents.append(nullid)
+            parents.append(self.repo.nullid)
         p2 = parents.pop(0)
 
         text = commit.desc
@@ -356,7 +355,7 @@
             p2 = parents.pop(0)
             p1ctx = self.repo[p1]
             p2ctx = None
-            if p2 != nullid:
+            if p2 != self.repo.nullid:
                 p2ctx = self.repo[p2]
             fileset = set(files)
             if full:
@@ -421,7 +420,7 @@
 
     def puttags(self, tags):
         tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
-        tagparent = tagparent or nullid
+        tagparent = tagparent or self.repo.nullid
 
         oldlines = set()
         for branch, heads in pycompat.iteritems(self.repo.branchmap()):
--- a/hgext/git/dirstate.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/git/dirstate.py	Mon May 03 18:55:19 2021 +0200
@@ -4,7 +4,7 @@
 import errno
 import os
 
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
 from mercurial import (
     error,
     extensions,
@@ -81,14 +81,16 @@
         except pygit2.GitError:
             # Typically happens when peeling HEAD fails, as in an
             # empty repository.
-            return nullid
+            return sha1nodeconstants.nullid
 
     def p2(self):
         # TODO: MERGE_HEAD? something like that, right?
-        return nullid
+        return sha1nodeconstants.nullid
 
-    def setparents(self, p1, p2=nullid):
-        assert p2 == nullid, b'TODO merging support'
+    def setparents(self, p1, p2=None):
+        if p2 is None:
+            p2 = sha1nodeconstants.nullid
+        assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
         self.git.head.set_target(gitutil.togitnode(p1))
 
     @util.propertycache
@@ -102,7 +104,7 @@
 
     def parents(self):
         # TODO how on earth do we find p2 if a merge is in flight?
-        return self.p1(), nullid
+        return self.p1(), sha1nodeconstants.nullid
 
     def __iter__(self):
         return (pycompat.fsencode(f.path) for f in self.git.index)
--- a/hgext/git/gitlog.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/git/gitlog.py	Mon May 03 18:55:19 2021 +0200
@@ -5,11 +5,8 @@
 from mercurial.node import (
     bin,
     hex,
-    nullhex,
-    nullid,
     nullrev,
     sha1nodeconstants,
-    wdirhex,
 )
 from mercurial import (
     ancestor,
@@ -47,7 +44,7 @@
         )
 
     def rev(self, n):
-        if n == nullid:
+        if n == sha1nodeconstants.nullid:
             return -1
         t = self._db.execute(
             'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
@@ -58,7 +55,7 @@
 
     def node(self, r):
         if r == nullrev:
-            return nullid
+            return sha1nodeconstants.nullid
         t = self._db.execute(
             'SELECT node FROM changelog WHERE rev = ?', (r,)
         ).fetchone()
@@ -135,7 +132,7 @@
             bin(v[0]): v[1]
             for v in self._db.execute('SELECT node, rev FROM changelog')
         }
-        r[nullid] = nullrev
+        r[sha1nodeconstants.nullid] = nullrev
         return r
 
     def tip(self):
@@ -144,7 +141,7 @@
         ).fetchone()
         if t:
             return bin(t[0])
-        return nullid
+        return sha1nodeconstants.nullid
 
     def revs(self, start=0, stop=None):
         if stop is None:
@@ -167,7 +164,7 @@
         return -1
 
     def _partialmatch(self, id):
-        if wdirhex.startswith(id):
+        if sha1nodeconstants.wdirhex.startswith(id):
             raise error.WdirUnsupported
         candidates = [
             bin(x[0])
@@ -176,8 +173,8 @@
                 (pycompat.sysstr(id + b'%'),),
             )
         ]
-        if nullhex.startswith(id):
-            candidates.append(nullid)
+        if sha1nodeconstants.nullhex.startswith(id):
+            candidates.append(sha1nodeconstants.nullid)
         if len(candidates) > 1:
             raise error.AmbiguousPrefixLookupError(
                 id, b'00changelog.i', _(b'ambiguous identifier')
@@ -223,8 +220,10 @@
             n = nodeorrev
         extra = {b'branch': b'default'}
         # handle looking up nullid
-        if n == nullid:
-            return hgchangelog._changelogrevision(extra=extra, manifest=nullid)
+        if n == sha1nodeconstants.nullid:
+            return hgchangelog._changelogrevision(
+                extra=extra, manifest=sha1nodeconstants.nullid
+            )
         hn = gitutil.togitnode(n)
         # We've got a real commit!
         files = [
@@ -301,7 +300,7 @@
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
         if common is None:
-            common = [nullid]
+            common = [sha1nodeconstants.nullid]
         if heads is None:
             heads = self.heads()
 
@@ -400,9 +399,9 @@
     ):
         parents = []
         hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
-        if p1 != nullid:
+        if p1 != sha1nodeconstants.nullid:
             parents.append(hp1)
-        if p2 and p2 != nullid:
+        if p2 and p2 != sha1nodeconstants.nullid:
             parents.append(hp2)
         assert date is not None
         timestamp, tz = date
@@ -435,7 +434,7 @@
         return self.get(b'', node)
 
     def get(self, relpath, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             # TODO: this should almost certainly be a memgittreemanifestctx
             return manifest.memtreemanifestctx(self, relpath)
         commit = self.gitrepo[gitutil.togitnode(node)]
@@ -454,9 +453,10 @@
         super(filelog, self).__init__(gr, db)
         assert isinstance(path, bytes)
         self.path = path
+        self.nullid = sha1nodeconstants.nullid
 
     def read(self, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             return b''
         return self.gitrepo[gitutil.togitnode(node)].data
 
--- a/hgext/git/gitutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/git/gitutil.py	Mon May 03 18:55:19 2021 +0200
@@ -1,7 +1,7 @@
 """utilities to assist in working with pygit2"""
 from __future__ import absolute_import
 
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex, sha1nodeconstants
 
 from mercurial import pycompat
 
@@ -50,4 +50,4 @@
     return bin(n)
 
 
-nullgit = togitnode(nullid)
+nullgit = togitnode(sha1nodeconstants.nullid)
--- a/hgext/git/index.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/git/index.py	Mon May 03 18:55:19 2021 +0200
@@ -5,9 +5,7 @@
 import sqlite3
 
 from mercurial.i18n import _
-from mercurial.node import (
-    nullid,
-)
+from mercurial.node import sha1nodeconstants
 
 from mercurial import (
     encoding,
@@ -317,7 +315,9 @@
                 )
             new_files = (p.delta.new_file for p in patchgen)
             files = {
-                nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
+                nf.path: nf.id.hex
+                for nf in new_files
+                if nf.id.raw != sha1nodeconstants.nullid
             }
             for p, n in files.items():
                 # We intentionally set NULLs for any file parentage
--- a/hgext/gpg.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/gpg.py	Mon May 03 18:55:19 2021 +0200
@@ -14,7 +14,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -314,7 +313,9 @@
     if revs:
         nodes = [repo.lookup(n) for n in revs]
     else:
-        nodes = [node for node in repo.dirstate.parents() if node != nullid]
+        nodes = [
+            node for node in repo.dirstate.parents() if node != repo.nullid
+        ]
         if len(nodes) > 1:
             raise error.Abort(
                 _(b'uncommitted merge - please provide a specific revision')
--- a/hgext/hgk.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/hgk.py	Mon May 03 18:55:19 2021 +0200
@@ -40,7 +40,6 @@
 
 from mercurial.i18n import _
 from mercurial.node import (
-    nullid,
     nullrev,
     short,
 )
@@ -95,7 +94,7 @@
         mmap2 = repo[node2].manifest()
         m = scmutil.match(repo[node1], files)
         st = repo.status(node1, node2, m)
-        empty = short(nullid)
+        empty = short(repo.nullid)
 
         for f in st.modified:
             # TODO get file permissions
@@ -317,9 +316,9 @@
             parentstr = b""
             if parents:
                 pp = repo.changelog.parents(n)
-                if pp[0] != nullid:
+                if pp[0] != repo.nullid:
                     parentstr += b" " + short(pp[0])
-                if pp[1] != nullid:
+                if pp[1] != repo.nullid:
                     parentstr += b" " + short(pp[1])
             if not full:
                 ui.write(b"%s%s\n" % (short(n), parentstr))
--- a/hgext/journal.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/journal.py	Mon May 03 18:55:19 2021 +0200
@@ -22,7 +22,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
 )
 
 from mercurial import (
@@ -117,8 +116,8 @@
     new = list(new)
     if util.safehasattr(dirstate, 'journalstorage'):
         # only record two hashes if there was a merge
-        oldhashes = old[:1] if old[1] == nullid else old
-        newhashes = new[:1] if new[1] == nullid else new
+        oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
+        newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
         dirstate.journalstorage.record(
             wdirparenttype, b'.', oldhashes, newhashes
         )
@@ -131,7 +130,7 @@
     if util.safehasattr(repo, 'journal'):
         oldmarks = bookmarks.bmstore(repo)
         for mark, value in pycompat.iteritems(store):
-            oldvalue = oldmarks.get(mark, nullid)
+            oldvalue = oldmarks.get(mark, repo.nullid)
             if value != oldvalue:
                 repo.journal.record(bookmarktype, mark, oldvalue, value)
     return orig(store, fp)
--- a/hgext/largefiles/basestore.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/largefiles/basestore.py	Mon May 03 18:55:19 2021 +0200
@@ -11,7 +11,8 @@
 
 from mercurial.i18n import _
 
-from mercurial import node, util
+from mercurial.node import short
+from mercurial import util
 from mercurial.utils import (
     urlutil,
 )
@@ -137,7 +138,7 @@
         filestocheck = []  # list of (cset, filename, expectedhash)
         for rev in revs:
             cctx = self.repo[rev]
-            cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
+            cset = b"%d:%s" % (cctx.rev(), short(cctx.node()))
 
             for standin in cctx:
                 filename = lfutil.splitstandin(standin)
--- a/hgext/largefiles/lfcommands.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/largefiles/lfcommands.py	Mon May 03 18:55:19 2021 +0200
@@ -17,7 +17,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
 )
 
 from mercurial import (
@@ -115,7 +114,7 @@
             rsrc[ctx]
             for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
         )
-        revmap = {nullid: nullid}
+        revmap = {rsrc.nullid: rdst.nullid}
         if tolfile:
             # Lock destination to prevent modification while it is converted to.
             # Don't need to lock src because we are just reading from its
@@ -340,7 +339,7 @@
 # Generate list of changed files
 def _getchangedfiles(ctx, parents):
     files = set(ctx.files())
-    if nullid not in parents:
+    if ctx.repo().nullid not in parents:
         mc = ctx.manifest()
         for pctx in ctx.parents():
             for fn in pctx.manifest().diff(mc):
@@ -354,7 +353,7 @@
     for p in ctx.parents():
         parents.append(revmap[p.node()])
     while len(parents) < 2:
-        parents.append(nullid)
+        parents.append(ctx.repo().nullid)
     return parents
 
 
--- a/hgext/largefiles/lfutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/largefiles/lfutil.py	Mon May 03 18:55:19 2021 +0200
@@ -15,10 +15,7 @@
 import stat
 
 from mercurial.i18n import _
-from mercurial.node import (
-    hex,
-    nullid,
-)
+from mercurial.node import hex
 from mercurial.pycompat import open
 
 from mercurial import (
@@ -613,7 +610,7 @@
     ) as progress:
         for i, n in enumerate(missing):
             progress.update(i)
-            parents = [p for p in repo[n].parents() if p != nullid]
+            parents = [p for p in repo[n].parents() if p != repo.nullid]
 
             with lfstatus(repo, value=False):
                 ctx = repo[n]
--- a/hgext/lfs/wrapper.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/lfs/wrapper.py	Mon May 03 18:55:19 2021 +0200
@@ -10,7 +10,7 @@
 import hashlib
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid, short
+from mercurial.node import bin, hex, short
 from mercurial.pycompat import (
     getattr,
     setattr,
@@ -158,7 +158,7 @@
         rev = rlog.rev(node)
     else:
         node = rlog.node(rev)
-    if node == nullid:
+    if node == rlog.nullid:
         return False
     flags = rlog.flags(rev)
     return bool(flags & revlog.REVIDX_EXTSTORED)
--- a/hgext/mq.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/mq.py	Mon May 03 18:55:19 2021 +0200
@@ -73,7 +73,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -908,13 +907,13 @@
         """
         if rev is None:
             (p1, p2) = repo.dirstate.parents()
-            if p2 == nullid:
+            if p2 == repo.nullid:
                 return p1
             if not self.applied:
                 return None
             return self.applied[-1].node
         p1, p2 = repo.changelog.parents(rev)
-        if p2 != nullid and p2 in [x.node for x in self.applied]:
+        if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
             return p2
         return p1
 
@@ -1591,7 +1590,7 @@
             for hs in repo.branchmap().iterheads():
                 heads.extend(hs)
             if not heads:
-                heads = [nullid]
+                heads = [repo.nullid]
             if repo.dirstate.p1() not in heads and not exact:
                 self.ui.status(_(b"(working directory not at a head)\n"))
 
@@ -1857,7 +1856,7 @@
                         fctx = ctx[f]
                         repo.wwrite(f, fctx.data(), fctx.flags())
                         repo.dirstate.normal(f)
-                    repo.setparents(qp, nullid)
+                    repo.setparents(qp, repo.nullid)
             for patch in reversed(self.applied[start:end]):
                 self.ui.status(_(b"popping %s\n") % patch.name)
             del self.applied[start:end]
--- a/hgext/narrow/narrowbundle2.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/narrow/narrowbundle2.py	Mon May 03 18:55:19 2021 +0200
@@ -11,7 +11,6 @@
 import struct
 
 from mercurial.i18n import _
-from mercurial.node import nullid
 from mercurial import (
     bundle2,
     changegroup,
@@ -94,7 +93,7 @@
             raise error.Abort(_(b'depth must be positive, got %d') % depth)
 
     heads = set(heads or repo.heads())
-    common = set(common or [nullid])
+    common = set(common or [repo.nullid])
 
     visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
         repo, common, heads, set(), match, depth=depth
@@ -128,7 +127,7 @@
     common,
     known,
 ):
-    common = set(common or [nullid])
+    common = set(common or [repo.nullid])
     # Steps:
     # 1. Send kill for "$known & ::common"
     #
--- a/hgext/narrow/narrowcommands.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/narrow/narrowcommands.py	Mon May 03 18:55:19 2021 +0200
@@ -12,7 +12,6 @@
 from mercurial.i18n import _
 from mercurial.node import (
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -193,7 +192,7 @@
         kwargs[b'known'] = [
             hex(ctx.node())
             for ctx in repo.set(b'::%ln', pullop.common)
-            if ctx.node() != nullid
+            if ctx.node() != repo.nullid
         ]
         if not kwargs[b'known']:
             # Mercurial serializes an empty list as '' and deserializes it as
@@ -228,10 +227,17 @@
     unfi = repo.unfiltered()
     outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
     ui.status(_(b'looking for local changes to affected paths\n'))
+    progress = ui.makeprogress(
+        topic=_(b'changesets'),
+        unit=_(b'changesets'),
+        total=len(outgoing.missing) + len(outgoing.excluded),
+    )
     localnodes = []
-    for n in itertools.chain(outgoing.missing, outgoing.excluded):
-        if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
-            localnodes.append(n)
+    with progress:
+        for n in itertools.chain(outgoing.missing, outgoing.excluded):
+            progress.increment()
+            if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+                localnodes.append(n)
     revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
     hiddenrevs = repoview.filterrevs(repo, b'visible')
     visibletostrip = list(
@@ -275,6 +281,10 @@
                 )
                 hg.clean(repo, urev)
             overrides = {(b'devel', b'strip-obsmarkers'): False}
+            if backup:
+                ui.status(_(b'moving unwanted changesets to backup\n'))
+            else:
+                ui.status(_(b'deleting unwanted changesets\n'))
             with ui.configoverride(overrides, b'narrow'):
                 repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
 
@@ -310,6 +320,7 @@
                 util.unlinkpath(repo.svfs.join(f))
                 repo.store.markremoved(f)
 
+            ui.status(_(b'deleting unwanted files from working copy\n'))
             narrowspec.updateworkingcopy(repo, assumeclean=True)
             narrowspec.copytoworkingcopy(repo)
 
@@ -370,7 +381,7 @@
             ds = repo.dirstate
             p1, p2 = ds.p1(), ds.p2()
             with ds.parentchange():
-                ds.setparents(nullid, nullid)
+                ds.setparents(repo.nullid, repo.nullid)
         if isoldellipses:
             with wrappedextraprepare:
                 exchange.pull(repo, remote, heads=common)
@@ -380,7 +391,7 @@
                 known = [
                     ctx.node()
                     for ctx in repo.set(b'::%ln', common)
-                    if ctx.node() != nullid
+                    if ctx.node() != repo.nullid
                 ]
             with remote.commandexecutor() as e:
                 bundle = e.callcommand(
--- a/hgext/phabricator.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/phabricator.py	Mon May 03 18:55:19 2021 +0200
@@ -69,7 +69,7 @@
 import re
 import time
 
-from mercurial.node import bin, nullid, short
+from mercurial.node import bin, short
 from mercurial.i18n import _
 from mercurial.pycompat import getattr
 from mercurial.thirdparty import attr
@@ -586,7 +586,7 @@
                 tags.tag(
                     repo,
                     tagname,
-                    nullid,
+                    repo.nullid,
                     message=None,
                     user=None,
                     date=None,
@@ -1606,7 +1606,7 @@
                         tags.tag(
                             repo,
                             tagname,
-                            nullid,
+                            repo.nullid,
                             message=None,
                             user=None,
                             date=None,
--- a/hgext/rebase.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/rebase.py	Mon May 03 18:55:19 2021 +0200
@@ -446,8 +446,15 @@
             rebaseset = set(destmap.keys())
             rebaseset -= set(self.obsolete_with_successor_in_destination)
             rebaseset -= self.obsolete_with_successor_in_rebase_set
+            # We have our own divergence-checking in the rebase extension
+            overrides = {}
+            if obsolete.isenabled(self.repo, obsolete.createmarkersopt):
+                overrides = {
+                    (b'experimental', b'evolution.allowdivergence'): b'true'
+                }
             try:
-                rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+                with self.ui.configoverride(overrides):
+                    rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
             except error.Abort as e:
                 if e.hint is None:
                     e.hint = _(b'use --keep to keep original changesets')
--- a/hgext/remotefilelog/contentstore.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/contentstore.py	Mon May 03 18:55:19 2021 +0200
@@ -2,7 +2,10 @@
 
 import threading
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from mercurial.pycompat import getattr
 from mercurial import (
     mdiff,
@@ -55,7 +58,7 @@
         """
         chain = self.getdeltachain(name, node)
 
-        if chain[-1][ChainIndicies.BASENODE] != nullid:
+        if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
             # If we didn't receive a full chain, throw
             raise KeyError((name, hex(node)))
 
@@ -92,7 +95,7 @@
         deltabasenode.
         """
         chain = self._getpartialchain(name, node)
-        while chain[-1][ChainIndicies.BASENODE] != nullid:
+        while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
             x, x, deltabasename, deltabasenode, x = chain[-1]
             try:
                 morechain = self._getpartialchain(deltabasename, deltabasenode)
@@ -187,7 +190,12 @@
         # Since remotefilelog content stores only contain full texts, just
         # return that.
         revision = self.get(name, node)
-        return revision, name, nullid, self.getmeta(name, node)
+        return (
+            revision,
+            name,
+            sha1nodeconstants.nullid,
+            self.getmeta(name, node),
+        )
 
     def getdeltachain(self, name, node):
         # Since remotefilelog content stores just contain full texts, we return
@@ -195,7 +203,7 @@
         # The nullid in the deltabasenode slot indicates that the revision is a
         # fulltext.
         revision = self.get(name, node)
-        return [(name, node, None, nullid, revision)]
+        return [(name, node, None, sha1nodeconstants.nullid, revision)]
 
     def getmeta(self, name, node):
         self._sanitizemetacache()
@@ -237,7 +245,12 @@
 
     def getdelta(self, name, node):
         revision = self.get(name, node)
-        return revision, name, nullid, self._shared.getmeta(name, node)
+        return (
+            revision,
+            name,
+            sha1nodeconstants.nullid,
+            self._shared.getmeta(name, node),
+        )
 
     def getdeltachain(self, name, node):
         # Since our remote content stores just contain full texts, we return a
@@ -245,7 +258,7 @@
         # The nullid in the deltabasenode slot indicates that the revision is a
         # fulltext.
         revision = self.get(name, node)
-        return [(name, node, None, nullid, revision)]
+        return [(name, node, None, sha1nodeconstants.nullid, revision)]
 
     def getmeta(self, name, node):
         self._fileservice.prefetch(
@@ -276,11 +289,11 @@
 
     def getdelta(self, name, node):
         revision = self.get(name, node)
-        return revision, name, nullid, self.getmeta(name, node)
+        return revision, name, self._cl.nullid, self.getmeta(name, node)
 
     def getdeltachain(self, name, node):
         revision = self.get(name, node)
-        return [(name, node, None, nullid, revision)]
+        return [(name, node, None, self._cl.nullid, revision)]
 
     def getmeta(self, name, node):
         rl = self._revlog(name)
@@ -304,9 +317,9 @@
             missing.discard(ancnode)
 
             p1, p2 = rl.parents(ancnode)
-            if p1 != nullid and p1 not in known:
+            if p1 != self._cl.nullid and p1 not in known:
                 missing.add(p1)
-            if p2 != nullid and p2 not in known:
+            if p2 != self._cl.nullid and p2 not in known:
                 missing.add(p2)
 
             linknode = self._cl.node(rl.linkrev(ancrev))
--- a/hgext/remotefilelog/datapack.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/datapack.py	Mon May 03 18:55:19 2021 +0200
@@ -3,7 +3,10 @@
 import struct
 import zlib
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from mercurial.i18n import _
 from mercurial import (
     pycompat,
@@ -458,7 +461,7 @@
         rawindex = b''
         fmt = self.INDEXFORMAT
         for node, deltabase, offset, size in entries:
-            if deltabase == nullid:
+            if deltabase == sha1nodeconstants.nullid:
                 deltabaselocation = FULLTEXTINDEXMARK
             else:
                 # Instead of storing the deltabase node in the index, let's
--- a/hgext/remotefilelog/debugcommands.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/debugcommands.py	Mon May 03 18:55:19 2021 +0200
@@ -12,7 +12,7 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
+    sha1nodeconstants,
     short,
 )
 from mercurial.i18n import _
@@ -57,9 +57,9 @@
             _(b"%s => %s  %s  %s  %s\n")
             % (short(node), short(p1), short(p2), short(linknode), copyfrom)
         )
-        if p1 != nullid:
+        if p1 != sha1nodeconstants.nullid:
             queue.append(p1)
-        if p2 != nullid:
+        if p2 != sha1nodeconstants.nullid:
             queue.append(p2)
 
 
@@ -152,7 +152,7 @@
             try:
                 pp = r.parents(node)
             except Exception:
-                pp = [nullid, nullid]
+                pp = [repo.nullid, repo.nullid]
             ui.write(
                 b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
                 % (
@@ -197,7 +197,7 @@
         node = r.node(i)
         pp = r.parents(node)
         ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
-        if pp[1] != nullid:
+        if pp[1] != repo.nullid:
             ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write(b"}\n")
 
@@ -212,7 +212,7 @@
             filepath = os.path.join(root, file)
             size, firstnode, mapping = parsefileblob(filepath, decompress)
             for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
-                if linknode == nullid:
+                if linknode == sha1nodeconstants.nullid:
                     actualpath = os.path.relpath(root, path)
                     key = fileserverclient.getcachekey(
                         b"reponame", actualpath, file
@@ -371,7 +371,7 @@
         current = node
         deltabase = bases[current]
 
-        while deltabase != nullid:
+        while deltabase != sha1nodeconstants.nullid:
             if deltabase not in nodes:
                 ui.warn(
                     (
@@ -397,7 +397,7 @@
             deltabase = bases[current]
         # Since ``node`` begins a valid chain, reset/memoize its base to nullid
         # so we don't traverse it again.
-        bases[node] = nullid
+        bases[node] = sha1nodeconstants.nullid
     return failures
 
 
--- a/hgext/remotefilelog/fileserverclient.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/fileserverclient.py	Mon May 03 18:55:19 2021 +0200
@@ -14,7 +14,7 @@
 import zlib
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
 from mercurial import (
     error,
     pycompat,
@@ -599,9 +599,13 @@
 
         # partition missing nodes into nullid and not-nullid so we can
         # warn about this filtering potentially shadowing bugs.
-        nullids = len([None for unused, id in missingids if id == nullid])
+        nullids = len(
+            [None for unused, id in missingids if id == self.repo.nullid]
+        )
         if nullids:
-            missingids = [(f, id) for f, id in missingids if id != nullid]
+            missingids = [
+                (f, id) for f, id in missingids if id != self.repo.nullid
+            ]
             repo.ui.develwarn(
                 (
                     b'remotefilelog not fetching %d null revs'
--- a/hgext/remotefilelog/historypack.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/historypack.py	Mon May 03 18:55:19 2021 +0200
@@ -2,7 +2,10 @@
 
 import struct
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from mercurial import (
     pycompat,
     util,
@@ -147,9 +150,9 @@
                 pending.remove(ancnode)
                 p1node = entry[ANC_P1NODE]
                 p2node = entry[ANC_P2NODE]
-                if p1node != nullid and p1node not in known:
+                if p1node != sha1nodeconstants.nullid and p1node not in known:
                     pending.add(p1node)
-                if p2node != nullid and p2node not in known:
+                if p2node != sha1nodeconstants.nullid and p2node not in known:
                     pending.add(p2node)
 
                 yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
@@ -457,9 +460,9 @@
             def parentfunc(node):
                 x, p1, p2, x, x, x = entrymap[node]
                 parents = []
-                if p1 != nullid:
+                if p1 != sha1nodeconstants.nullid:
                     parents.append(p1)
-                if p2 != nullid:
+                if p2 != sha1nodeconstants.nullid:
                     parents.append(p2)
                 return parents
 
--- a/hgext/remotefilelog/metadatastore.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/metadatastore.py	Mon May 03 18:55:19 2021 +0200
@@ -1,6 +1,9 @@
 from __future__ import absolute_import
 
-from mercurial.node import hex, nullid
+from mercurial.node import (
+    hex,
+    sha1nodeconstants,
+)
 from . import (
     basestore,
     shallowutil,
@@ -51,9 +54,9 @@
                     missing.append((name, node))
                     continue
                 p1, p2, linknode, copyfrom = value
-                if p1 != nullid and p1 not in known:
+                if p1 != sha1nodeconstants.nullid and p1 not in known:
                     queue.append((copyfrom or curname, p1))
-                if p2 != nullid and p2 not in known:
+                if p2 != sha1nodeconstants.nullid and p2 not in known:
                     queue.append((curname, p2))
             return missing
 
--- a/hgext/remotefilelog/remotefilectx.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/remotefilectx.py	Mon May 03 18:55:19 2021 +0200
@@ -9,7 +9,7 @@
 import collections
 import time
 
-from mercurial.node import bin, hex, nullid, nullrev
+from mercurial.node import bin, hex, nullrev
 from mercurial import (
     ancestor,
     context,
@@ -35,7 +35,7 @@
         ancestormap=None,
     ):
         if fileid == nullrev:
-            fileid = nullid
+            fileid = repo.nullid
         if fileid and len(fileid) == 40:
             fileid = bin(fileid)
         super(remotefilectx, self).__init__(
@@ -78,7 +78,7 @@
 
     @propertycache
     def _linkrev(self):
-        if self._filenode == nullid:
+        if self._filenode == self._repo.nullid:
             return nullrev
 
         ancestormap = self.ancestormap()
@@ -174,7 +174,7 @@
 
         p1, p2, linknode, copyfrom = ancestormap[self._filenode]
         results = []
-        if p1 != nullid:
+        if p1 != repo.nullid:
             path = copyfrom or self._path
             flog = repo.file(path)
             p1ctx = remotefilectx(
@@ -183,7 +183,7 @@
             p1ctx._descendantrev = self.rev()
             results.append(p1ctx)
 
-        if p2 != nullid:
+        if p2 != repo.nullid:
             path = self._path
             flog = repo.file(path)
             p2ctx = remotefilectx(
@@ -504,25 +504,25 @@
             if renamed:
                 p1 = renamed
             else:
-                p1 = (path, pcl[0]._manifest.get(path, nullid))
+                p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid))
 
-            p2 = (path, nullid)
+            p2 = (path, self._repo.nullid)
             if len(pcl) > 1:
-                p2 = (path, pcl[1]._manifest.get(path, nullid))
+                p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid))
 
             m = {}
-            if p1[1] != nullid:
+            if p1[1] != self._repo.nullid:
                 p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
                 m.update(p1ctx.filelog().ancestormap(p1[1]))
 
-            if p2[1] != nullid:
+            if p2[1] != self._repo.nullid:
                 p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
                 m.update(p2ctx.filelog().ancestormap(p2[1]))
 
             copyfrom = b''
             if renamed:
                 copyfrom = renamed[0]
-            m[None] = (p1[1], p2[1], nullid, copyfrom)
+            m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom)
             self._ancestormap = m
 
         return self._ancestormap
--- a/hgext/remotefilelog/remotefilelog.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/remotefilelog.py	Mon May 03 18:55:19 2021 +0200
@@ -10,12 +10,7 @@
 import collections
 import os
 
-from mercurial.node import (
-    bin,
-    nullid,
-    wdirfilenodeids,
-    wdirid,
-)
+from mercurial.node import bin
 from mercurial.i18n import _
 from mercurial import (
     ancestor,
@@ -100,7 +95,7 @@
 
         pancestors = {}
         queue = []
-        if realp1 != nullid:
+        if realp1 != self.repo.nullid:
             p1flog = self
             if copyfrom:
                 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
@@ -108,7 +103,7 @@
             pancestors.update(p1flog.ancestormap(realp1))
             queue.append(realp1)
             visited.add(realp1)
-        if p2 != nullid:
+        if p2 != self.repo.nullid:
             pancestors.update(self.ancestormap(p2))
             queue.append(p2)
             visited.add(p2)
@@ -129,10 +124,10 @@
                 pacopyfrom,
             )
 
-            if pa1 != nullid and pa1 not in visited:
+            if pa1 != self.repo.nullid and pa1 not in visited:
                 queue.append(pa1)
                 visited.add(pa1)
-            if pa2 != nullid and pa2 not in visited:
+            if pa2 != self.repo.nullid and pa2 not in visited:
                 queue.append(pa2)
                 visited.add(pa2)
 
@@ -238,7 +233,7 @@
         returns True if text is different than what is stored.
         """
 
-        if node == nullid:
+        if node == self.repo.nullid:
             return True
 
         nodetext = self.read(node)
@@ -275,13 +270,13 @@
         return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
 
     def parents(self, node):
-        if node == nullid:
-            return nullid, nullid
+        if node == self.repo.nullid:
+            return self.repo.nullid, self.repo.nullid
 
         ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
         p1, p2, linknode, copyfrom = ancestormap[node]
         if copyfrom:
-            p1 = nullid
+            p1 = self.repo.nullid
 
         return p1, p2
 
@@ -317,8 +312,8 @@
             if prevnode is None:
                 basenode = prevnode = p1
             if basenode == node:
-                basenode = nullid
-            if basenode != nullid:
+                basenode = self.repo.nullid
+            if basenode != self.repo.nullid:
                 revision = None
                 delta = self.revdiff(basenode, node)
             else:
@@ -380,13 +375,16 @@
         this is generally only used for bundling and communicating with vanilla
         hg clients.
         """
-        if node == nullid:
+        if node == self.repo.nullid:
             return b""
         if len(node) != 20:
             raise error.LookupError(
                 node, self.filename, _(b'invalid revision input')
             )
-        if node == wdirid or node in wdirfilenodeids:
+        if (
+            node == self.repo.nodeconstants.wdirid
+            or node in self.repo.nodeconstants.wdirfilenodeids
+        ):
             raise error.WdirUnsupported
 
         store = self.repo.contentstore
@@ -432,8 +430,8 @@
         return self.repo.metadatastore.getancestors(self.filename, node)
 
     def ancestor(self, a, b):
-        if a == nullid or b == nullid:
-            return nullid
+        if a == self.repo.nullid or b == self.repo.nullid:
+            return self.repo.nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
         nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -442,13 +440,13 @@
         if ancs:
             # choose a consistent winner when there's a tie
             return min(map(nodemap.__getitem__, ancs))
-        return nullid
+        return self.repo.nullid
 
     def commonancestorsheads(self, a, b):
         """calculate all the heads of the common ancestors of nodes a and b"""
 
-        if a == nullid or b == nullid:
-            return nullid
+        if a == self.repo.nullid or b == self.repo.nullid:
+            return self.repo.nullid
 
         revmap, parentfunc = self._buildrevgraph(a, b)
         nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -472,10 +470,10 @@
                 p1, p2, linknode, copyfrom = pdata
                 # Don't follow renames (copyfrom).
                 # remotefilectx.ancestor does that.
-                if p1 != nullid and not copyfrom:
+                if p1 != self.repo.nullid and not copyfrom:
                     parents.append(p1)
                     allparents.add(p1)
-                if p2 != nullid:
+                if p2 != self.repo.nullid:
                     parents.append(p2)
                     allparents.add(p2)
 
--- a/hgext/remotefilelog/remotefilelogserver.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/remotefilelogserver.py	Mon May 03 18:55:19 2021 +0200
@@ -13,7 +13,7 @@
 import zlib
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
 from mercurial.pycompat import open
 from mercurial import (
     changegroup,
@@ -242,7 +242,7 @@
     filecachepath = os.path.join(cachepath, path, hex(node))
     if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
         filectx = repo.filectx(path, fileid=node)
-        if filectx.node() == nullid:
+        if filectx.node() == repo.nullid:
             repo.changelog = changelog.changelog(repo.svfs)
             filectx = repo.filectx(path, fileid=node)
 
@@ -284,7 +284,7 @@
     """A server api for requesting a filelog's heads"""
     flog = repo.file(path)
     heads = flog.heads()
-    return b'\n'.join((hex(head) for head in heads if head != nullid))
+    return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
 
 
 def getfile(repo, proto, file, node):
@@ -302,7 +302,7 @@
     if not cachepath:
         cachepath = os.path.join(repo.path, b"remotefilelogcache")
     node = bin(node.strip())
-    if node == nullid:
+    if node == repo.nullid:
         return b'0\0'
     return b'0\0' + _loadfileblob(repo, cachepath, file, node)
 
@@ -327,7 +327,7 @@
                 break
 
             node = bin(request[:40])
-            if node == nullid:
+            if node == repo.nullid:
                 yield b'0\n'
                 continue
 
@@ -380,8 +380,8 @@
         ancestortext = b""
         for ancestorctx in ancestors:
             parents = ancestorctx.parents()
-            p1 = nullid
-            p2 = nullid
+            p1 = repo.nullid
+            p2 = repo.nullid
             if len(parents) > 0:
                 p1 = parents[0].filenode()
             if len(parents) > 1:
--- a/hgext/remotefilelog/repack.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/repack.py	Mon May 03 18:55:19 2021 +0200
@@ -4,10 +4,7 @@
 import time
 
 from mercurial.i18n import _
-from mercurial.node import (
-    nullid,
-    short,
-)
+from mercurial.node import short
 from mercurial import (
     encoding,
     error,
@@ -586,7 +583,7 @@
         # Create one contiguous chain and reassign deltabases.
         for i, node in enumerate(orphans):
             if i == 0:
-                deltabases[node] = (nullid, 0)
+                deltabases[node] = (self.repo.nullid, 0)
             else:
                 parent = orphans[i - 1]
                 deltabases[node] = (parent, deltabases[parent][1] + 1)
@@ -676,8 +673,8 @@
                 # of immediate child
                 deltatuple = deltabases.get(node, None)
                 if deltatuple is None:
-                    deltabase, chainlen = nullid, 0
-                    deltabases[node] = (nullid, 0)
+                    deltabase, chainlen = self.repo.nullid, 0
+                    deltabases[node] = (self.repo.nullid, 0)
                     nobase.add(node)
                 else:
                     deltabase, chainlen = deltatuple
@@ -692,7 +689,7 @@
                     # file was copied from elsewhere. So don't attempt to do any
                     # deltas with the other file.
                     if copyfrom:
-                        p1 = nullid
+                        p1 = self.repo.nullid
 
                     if chainlen < maxchainlen:
                         # Record this child as the delta base for its parents.
@@ -700,9 +697,9 @@
                         # many children, and this will only choose the last one.
                         # TODO: record all children and try all deltas to find
                         # best
-                        if p1 != nullid:
+                        if p1 != self.repo.nullid:
                             deltabases[p1] = (node, chainlen + 1)
-                        if p2 != nullid:
+                        if p2 != self.repo.nullid:
                             deltabases[p2] = (node, chainlen + 1)
 
             # experimental config: repack.chainorphansbysize
@@ -719,7 +716,7 @@
                 # TODO: Optimize the deltachain fetching. Since we're
                 # iterating over the different version of the file, we may
                 # be fetching the same deltachain over and over again.
-                if deltabase != nullid:
+                if deltabase != self.repo.nullid:
                     deltaentry = self.data.getdelta(filename, node)
                     delta, deltabasename, origdeltabase, meta = deltaentry
                     size = meta.get(constants.METAKEYSIZE)
@@ -791,9 +788,9 @@
                     # If copyfrom == filename, it means the copy history
                     # went to come other file, then came back to this one, so we
                     # should continue processing it.
-                    if p1 != nullid and copyfrom != filename:
+                    if p1 != self.repo.nullid and copyfrom != filename:
                         dontprocess.add(p1)
-                    if p2 != nullid:
+                    if p2 != self.repo.nullid:
                         dontprocess.add(p2)
                     continue
 
@@ -814,9 +811,9 @@
         def parentfunc(node):
             p1, p2, linknode, copyfrom = ancestors[node]
             parents = []
-            if p1 != nullid:
+            if p1 != self.repo.nullid:
                 parents.append(p1)
-            if p2 != nullid:
+            if p2 != self.repo.nullid:
                 parents.append(p2)
             return parents
 
--- a/hgext/remotefilelog/shallowbundle.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/shallowbundle.py	Mon May 03 18:55:19 2021 +0200
@@ -7,7 +7,7 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
 from mercurial import (
     bundlerepo,
     changegroup,
@@ -143,7 +143,7 @@
 
     def nodechunk(self, revlog, node, prevnode, linknode):
         prefix = b''
-        if prevnode == nullid:
+        if prevnode == revlog.nullid:
             delta = revlog.rawdata(node)
             prefix = mdiff.trivialdiffheader(len(delta))
         else:
@@ -245,7 +245,7 @@
     processed = set()
 
     def available(f, node, depf, depnode):
-        if depnode != nullid and (depf, depnode) not in processed:
+        if depnode != repo.nullid and (depf, depnode) not in processed:
             if not (depf, depnode) in revisiondatas:
                 # It's not in the changegroup, assume it's already
                 # in the repo
@@ -267,7 +267,7 @@
         dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
 
         for dependent in dependents:
-            if dependent == nullid or (f, dependent) in revisiondatas:
+            if dependent == repo.nullid or (f, dependent) in revisiondatas:
                 continue
             prefetchfiles.append((f, hex(dependent)))
 
@@ -306,7 +306,7 @@
                 continue
 
         for p in [p1, p2]:
-            if p != nullid:
+            if p != repo.nullid:
                 if not available(f, node, f, p):
                     continue
 
--- a/hgext/remotefilelog/shallowrepo.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/remotefilelog/shallowrepo.py	Mon May 03 18:55:19 2021 +0200
@@ -9,7 +9,7 @@
 import os
 
 from mercurial.i18n import _
-from mercurial.node import hex, nullid, nullrev
+from mercurial.node import hex, nullrev
 from mercurial import (
     encoding,
     error,
@@ -206,8 +206,8 @@
                 m1 = ctx.p1().manifest()
                 files = []
                 for f in ctx.modified() + ctx.added():
-                    fparent1 = m1.get(f, nullid)
-                    if fparent1 != nullid:
+                    fparent1 = m1.get(f, self.nullid)
+                    if fparent1 != self.nullid:
                         files.append((f, hex(fparent1)))
                 self.fileservice.prefetch(files)
             return super(shallowrepository, self).commitctx(
--- a/hgext/sqlitestore.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/sqlitestore.py	Mon May 03 18:55:19 2021 +0200
@@ -52,7 +52,6 @@
 
 from mercurial.i18n import _
 from mercurial.node import (
-    nullid,
     nullrev,
     sha1nodeconstants,
     short,
@@ -366,12 +365,12 @@
                 )
 
             if p1rev == nullrev:
-                p1node = nullid
+                p1node = sha1nodeconstants.nullid
             else:
                 p1node = self._revtonode[p1rev]
 
             if p2rev == nullrev:
-                p2node = nullid
+                p2node = sha1nodeconstants.nullid
             else:
                 p2node = self._revtonode[p2rev]
 
@@ -400,7 +399,7 @@
         return iter(pycompat.xrange(len(self._revisions)))
 
     def hasnode(self, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             return False
 
         return node in self._nodetorev
@@ -411,8 +410,8 @@
         )
 
     def parents(self, node):
-        if node == nullid:
-            return nullid, nullid
+        if node == sha1nodeconstants.nullid:
+            return sha1nodeconstants.nullid, sha1nodeconstants.nullid
 
         if node not in self._revisions:
             raise error.LookupError(node, self._path, _(b'no node'))
@@ -431,7 +430,7 @@
         return entry.p1rev, entry.p2rev
 
     def rev(self, node):
-        if node == nullid:
+        if node == sha1nodeconstants.nullid:
             return nullrev
 
         if node not in self._nodetorev:
@@ -441,7 +440,7 @@
 
     def node(self, rev):
         if rev == nullrev:
-            return nullid
+            return sha1nodeconstants.nullid
 
         if rev not in self._revtonode:
             raise IndexError(rev)
@@ -485,7 +484,7 @@
     def heads(self, start=None, stop=None):
         if start is None and stop is None:
             if not len(self):
-                return [nullid]
+                return [sha1nodeconstants.nullid]
 
         startrev = self.rev(start) if start is not None else nullrev
         stoprevs = {self.rev(n) for n in stop or []}
@@ -529,7 +528,7 @@
         return len(self.revision(node))
 
     def revision(self, node, raw=False, _verifyhash=True):
-        if node in (nullid, nullrev):
+        if node in (sha1nodeconstants.nullid, nullrev):
             return b''
 
         if isinstance(node, int):
@@ -596,7 +595,7 @@
                 b'unhandled value for nodesorder: %s' % nodesorder
             )
 
-        nodes = [n for n in nodes if n != nullid]
+        nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
 
         if not nodes:
             return
@@ -705,12 +704,12 @@
                 raise SQLiteStoreError(b'unhandled revision flag')
 
             if maybemissingparents:
-                if p1 != nullid and not self.hasnode(p1):
-                    p1 = nullid
+                if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
+                    p1 = sha1nodeconstants.nullid
                     storeflags |= FLAG_MISSING_P1
 
-                if p2 != nullid and not self.hasnode(p2):
-                    p2 = nullid
+                if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
+                    p2 = sha1nodeconstants.nullid
                     storeflags |= FLAG_MISSING_P2
 
             baserev = self.rev(deltabase)
@@ -736,7 +735,10 @@
                 # Possibly reset parents to make them proper.
                 entry = self._revisions[node]
 
-                if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
+                if (
+                    entry.flags & FLAG_MISSING_P1
+                    and p1 != sha1nodeconstants.nullid
+                ):
                     entry.p1node = p1
                     entry.p1rev = self._nodetorev[p1]
                     entry.flags &= ~FLAG_MISSING_P1
@@ -746,7 +748,10 @@
                         (self._nodetorev[p1], entry.flags, entry.rid),
                     )
 
-                if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
+                if (
+                    entry.flags & FLAG_MISSING_P2
+                    and p2 != sha1nodeconstants.nullid
+                ):
                     entry.p2node = p2
                     entry.p2rev = self._nodetorev[p2]
                     entry.flags &= ~FLAG_MISSING_P2
@@ -761,7 +766,7 @@
                 empty = False
                 continue
 
-            if deltabase == nullid:
+            if deltabase == sha1nodeconstants.nullid:
                 text = mdiff.patch(b'', delta)
                 storedelta = None
             else:
@@ -1012,7 +1017,7 @@
             assert revisiondata is not None
             deltabase = p1
 
-            if deltabase == nullid:
+            if deltabase == sha1nodeconstants.nullid:
                 delta = revisiondata
             else:
                 delta = mdiff.textdiff(
@@ -1021,7 +1026,7 @@
 
         # File index stores a pointer to its delta and the parent delta.
         # The parent delta is stored via a pointer to the fileindex PK.
-        if deltabase == nullid:
+        if deltabase == sha1nodeconstants.nullid:
             baseid = None
         else:
             baseid = self._revisions[deltabase].rid
@@ -1055,12 +1060,12 @@
 
         rev = len(self)
 
-        if p1 == nullid:
+        if p1 == sha1nodeconstants.nullid:
             p1rev = nullrev
         else:
             p1rev = self._nodetorev[p1]
 
-        if p2 == nullid:
+        if p2 == sha1nodeconstants.nullid:
             p2rev = nullrev
         else:
             p2rev = self._nodetorev[p2]
--- a/hgext/transplant.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/transplant.py	Mon May 03 18:55:19 2021 +0200
@@ -22,7 +22,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     short,
 )
 from mercurial import (
@@ -134,6 +133,7 @@
 class transplanter(object):
     def __init__(self, ui, repo, opts):
         self.ui = ui
+        self.repo = repo
         self.path = repo.vfs.join(b'transplant')
         self.opener = vfsmod.vfs(self.path)
         self.transplants = transplants(
@@ -221,7 +221,7 @@
                         exchange.pull(repo, source.peer(), heads=[node])
 
                 skipmerge = False
-                if parents[1] != nullid:
+                if parents[1] != repo.nullid:
                     if not opts.get(b'parent'):
                         self.ui.note(
                             _(b'skipping merge changeset %d:%s\n')
@@ -516,7 +516,7 @@
     def parselog(self, fp):
         parents = []
         message = []
-        node = nullid
+        node = self.repo.nullid
         inmsg = False
         user = None
         date = None
@@ -568,7 +568,7 @@
         def matchfn(node):
             if self.applied(repo, node, root):
                 return False
-            if source.changelog.parents(node)[1] != nullid:
+            if source.changelog.parents(node)[1] != repo.nullid:
                 return False
             extra = source.changelog.read(node)[5]
             cnode = extra.get(b'transplant_source')
@@ -804,7 +804,7 @@
     tp = transplanter(ui, repo, opts)
 
     p1 = repo.dirstate.p1()
-    if len(repo) > 0 and p1 == nullid:
+    if len(repo) > 0 and p1 == repo.nullid:
         raise error.Abort(_(b'no revision checked out'))
     if opts.get(b'continue'):
         if not tp.canresume():
--- a/hgext/uncommit.py	Sat May 01 00:28:39 2021 -0400
+++ b/hgext/uncommit.py	Mon May 03 18:55:19 2021 +0200
@@ -20,7 +20,6 @@
 from __future__ import absolute_import
 
 from mercurial.i18n import _
-from mercurial.node import nullid
 
 from mercurial import (
     cmdutil,
@@ -113,7 +112,7 @@
 
     new = context.memctx(
         repo,
-        parents=[base.node(), nullid],
+        parents=[base.node(), repo.nullid],
         text=message,
         files=files,
         filectxfn=filectxfn,
--- a/mercurial/bookmarks.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/bookmarks.py	Mon May 03 18:55:19 2021 +0200
@@ -15,7 +15,6 @@
     bin,
     hex,
     short,
-    wdirid,
 )
 from .pycompat import getattr
 from . import (
@@ -642,7 +641,7 @@
     binarydata = []
     for book, node in bookmarks:
         if not node:  # None or ''
-            node = wdirid
+            node = repo.nodeconstants.wdirid
         binarydata.append(_binaryentry.pack(node, len(book)))
         binarydata.append(book)
     return b''.join(binarydata)
@@ -674,7 +673,7 @@
         if len(bookmark) < length:
             if entry:
                 raise error.Abort(_(b'bad bookmark stream'))
-        if node == wdirid:
+        if node == repo.nodeconstants.wdirid:
             node = None
         books.append((bookmark, node))
     return books
--- a/mercurial/branchmap.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/branchmap.py	Mon May 03 18:55:19 2021 +0200
@@ -12,7 +12,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from . import (
@@ -189,7 +188,7 @@
         self,
         repo,
         entries=(),
-        tipnode=nullid,
+        tipnode=None,
         tiprev=nullrev,
         filteredhash=None,
         closednodes=None,
@@ -200,7 +199,10 @@
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog"""
         self._repo = repo
-        self.tipnode = tipnode
+        if tipnode is None:
+            self.tipnode = repo.nullid
+        else:
+            self.tipnode = tipnode
         self.tiprev = tiprev
         self.filteredhash = filteredhash
         # closednodes is a set of nodes that close their branch. If the branch
@@ -536,7 +538,7 @@
 
         if not self.validfor(repo):
             # cache key are not valid anymore
-            self.tipnode = nullid
+            self.tipnode = repo.nullid
             self.tiprev = nullrev
             for heads in self.iterheads():
                 tiprev = max(cl.rev(node) for node in heads)
--- a/mercurial/bundle2.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/bundle2.py	Mon May 03 18:55:19 2021 +0200
@@ -158,7 +158,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     short,
 )
 from . import (
@@ -2576,7 +2575,7 @@
             fullnodes=commonnodes,
         )
         cgdata = packer.generate(
-            {nullid},
+            {repo.nullid},
             list(commonnodes),
             False,
             b'narrow_widen',
--- a/mercurial/bundlerepo.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/bundlerepo.py	Mon May 03 18:55:19 2021 +0200
@@ -19,7 +19,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
 )
 
@@ -447,7 +446,9 @@
         return encoding.getcwd()  # always outside the repo
 
     # Check if parents exist in localrepo before setting
-    def setparents(self, p1, p2=nullid):
+    def setparents(self, p1, p2=None):
+        if p2 is None:
+            p2 = self.nullid
         p1rev = self.changelog.rev(p1)
         p2rev = self.changelog.rev(p2)
         msg = _(b"setting parent to node %s that only exists in the bundle\n")
--- a/mercurial/cext/manifest.c	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/cext/manifest.c	Mon May 03 18:55:19 2021 +0200
@@ -28,6 +28,7 @@
 typedef struct {
 	PyObject_HEAD
 	PyObject *pydata;
+	Py_ssize_t nodelen;
 	line *lines;
 	int numlines; /* number of line entries */
 	int livelines; /* number of non-deleted lines */
@@ -49,12 +50,11 @@
 }
 
 /* get the node value of a single line */
-static PyObject *nodeof(line *l, char *flag)
+static PyObject *nodeof(Py_ssize_t nodelen, line *l, char *flag)
 {
 	char *s = l->start;
 	Py_ssize_t llen = pathlen(l);
 	Py_ssize_t hlen = l->len - llen - 2;
-	Py_ssize_t hlen_raw;
 	PyObject *hash;
 	if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
 		PyErr_SetString(PyExc_ValueError, "manifest line too short");
@@ -73,36 +73,29 @@
 		break;
 	}
 
-	switch (hlen) {
-	case 40: /* sha1 */
-		hlen_raw = 20;
-		break;
-	case 64: /* new hash */
-		hlen_raw = 32;
-		break;
-	default:
+	if (hlen != 2 * nodelen) {
 		PyErr_SetString(PyExc_ValueError, "invalid node length in manifest");
 		return NULL;
 	}
-	hash = unhexlify(s + llen + 1, hlen_raw * 2);
+	hash = unhexlify(s + llen + 1, nodelen * 2);
 	if (!hash) {
 		return NULL;
 	}
 	if (l->hash_suffix != '\0') {
 		char newhash[33];
-		memcpy(newhash, PyBytes_AsString(hash), hlen_raw);
+		memcpy(newhash, PyBytes_AsString(hash), nodelen);
 		Py_DECREF(hash);
-		newhash[hlen_raw] = l->hash_suffix;
-		hash = PyBytes_FromStringAndSize(newhash, hlen_raw+1);
+		newhash[nodelen] = l->hash_suffix;
+		hash = PyBytes_FromStringAndSize(newhash, nodelen + 1);
 	}
 	return hash;
 }
 
 /* get the node hash and flags of a line as a tuple */
-static PyObject *hashflags(line *l)
+static PyObject *hashflags(Py_ssize_t nodelen, line *l)
 {
 	char flag;
-	PyObject *hash = nodeof(l, &flag);
+	PyObject *hash = nodeof(nodelen, l, &flag);
 	PyObject *flags;
 	PyObject *tup;
 
@@ -190,17 +183,23 @@
 static int lazymanifest_init(lazymanifest *self, PyObject *args)
 {
 	char *data;
-	Py_ssize_t len;
+	Py_ssize_t nodelen, len;
 	int err, ret;
 	PyObject *pydata;
 
 	lazymanifest_init_early(self);
-	if (!PyArg_ParseTuple(args, "S", &pydata)) {
+	if (!PyArg_ParseTuple(args, "nS", &nodelen, &pydata)) {
 		return -1;
 	}
-	err = PyBytes_AsStringAndSize(pydata, &data, &len);
+	if (nodelen != 20 && nodelen != 32) {
+		/* See fixed buffer in nodeof */
+		PyErr_Format(PyExc_ValueError, "Unsupported node length");
+		return -1;
+	}
+	self->nodelen = nodelen;
+	self->dirty = false;
 
-	self->dirty = false;
+	err = PyBytes_AsStringAndSize(pydata, &data, &len);
 	if (err == -1)
 		return -1;
 	self->pydata = pydata;
@@ -291,17 +290,18 @@
 
 static PyObject *lmiter_iterentriesnext(PyObject *o)
 {
+	lmIter *self = (lmIter *)o;
 	Py_ssize_t pl;
 	line *l;
 	char flag;
 	PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
-	l = lmiter_nextline((lmIter *)o);
+	l = lmiter_nextline(self);
 	if (!l) {
 		goto done;
 	}
 	pl = pathlen(l);
 	path = PyBytes_FromStringAndSize(l->start, pl);
-	hash = nodeof(l, &flag);
+	hash = nodeof(self->m->nodelen, l, &flag);
 	if (!path || !hash) {
 		goto done;
 	}
@@ -471,7 +471,7 @@
 		PyErr_Format(PyExc_KeyError, "No such manifest entry.");
 		return NULL;
 	}
-	return hashflags(hit);
+	return hashflags(self->nodelen, hit);
 }
 
 static int lazymanifest_delitem(lazymanifest *self, PyObject *key)
@@ -568,13 +568,13 @@
 	pyhash = PyTuple_GetItem(value, 0);
 	if (!PyBytes_Check(pyhash)) {
 		PyErr_Format(PyExc_TypeError,
-			     "node must be a 20 or 32 bytes string");
+			     "node must be a %zi bytes string", self->nodelen);
 		return -1;
 	}
 	hlen = PyBytes_Size(pyhash);
-	if (hlen != 20 && hlen != 32) {
+	if (hlen != self->nodelen) {
 		PyErr_Format(PyExc_TypeError,
-			     "node must be a 20 or 32 bytes string");
+			     "node must be a %zi bytes string", self->nodelen);
 		return -1;
 	}
 	hash = PyBytes_AsString(pyhash);
@@ -739,6 +739,7 @@
 		goto nomem;
 	}
 	lazymanifest_init_early(copy);
+	copy->nodelen = self->nodelen;
 	copy->numlines = self->numlines;
 	copy->livelines = self->livelines;
 	copy->dirty = false;
@@ -777,6 +778,7 @@
 		goto nomem;
 	}
 	lazymanifest_init_early(copy);
+	copy->nodelen = self->nodelen;
 	copy->dirty = true;
 	copy->lines = malloc(self->maxlines * sizeof(line));
 	if (!copy->lines) {
@@ -872,7 +874,7 @@
 		if (!key)
 			goto nomem;
 		if (result < 0) {
-			PyObject *l = hashflags(left);
+			PyObject *l = hashflags(self->nodelen, left);
 			if (!l) {
 				goto nomem;
 			}
@@ -885,7 +887,7 @@
 			Py_DECREF(outer);
 			sneedle++;
 		} else if (result > 0) {
-			PyObject *r = hashflags(right);
+			PyObject *r = hashflags(self->nodelen, right);
 			if (!r) {
 				goto nomem;
 			}
@@ -902,12 +904,12 @@
 			if (left->len != right->len
 			    || memcmp(left->start, right->start, left->len)
 			    || left->hash_suffix != right->hash_suffix) {
-				PyObject *l = hashflags(left);
+				PyObject *l = hashflags(self->nodelen, left);
 				PyObject *r;
 				if (!l) {
 					goto nomem;
 				}
-				r = hashflags(right);
+				r = hashflags(self->nodelen, right);
 				if (!r) {
 					Py_DECREF(l);
 					goto nomem;
--- a/mercurial/cext/parsers.c	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/cext/parsers.c	Mon May 03 18:55:19 2021 +0200
@@ -668,7 +668,7 @@
 void manifest_module_init(PyObject *mod);
 void revlog_module_init(PyObject *mod);
 
-static const int version = 17;
+static const int version = 18;
 
 static void module_init(PyObject *mod)
 {
--- a/mercurial/cext/parsers.pyi	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/cext/parsers.pyi	Mon May 03 18:55:19 2021 +0200
@@ -29,7 +29,7 @@
 
 # From manifest.c
 class lazymanifest:
-    def __init__(self, data: bytes): ...
+    def __init__(self, nodelen: int, data: bytes): ...
     def __iter__(self) -> Iterator[bytes]: ...
 
     def __len__(self) -> int: ...
--- a/mercurial/cext/revlog.c	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/cext/revlog.c	Mon May 03 18:55:19 2021 +0200
@@ -342,6 +342,46 @@
 		                     sidedata_offset, sidedata_comp_len);
 	}
 }
+/*
+ * Pack header information in binary
+ */
+static PyObject *index_pack_header(indexObject *self, PyObject *args)
+{
+	int header;
+	char out[4];
+	if (!PyArg_ParseTuple(args, "I", &header)) {
+		return NULL;
+	}
+	putbe32(header, out);
+	return PyBytes_FromStringAndSize(out, 4);
+}
+/*
+ * Return the raw binary string representing a revision
+ */
+static PyObject *index_entry_binary(indexObject *self, PyObject *value)
+{
+	long rev;
+	const char *data;
+	Py_ssize_t length = index_length(self);
+
+	if (!pylong_to_long(value, &rev)) {
+		return NULL;
+	}
+	if (rev < 0 || rev >= length) {
+		PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
+		             rev);
+		return NULL;
+	};
+
+	data = index_deref(self, rev);
+	if (data == NULL)
+		return NULL;
+	if (rev == 0) {
+		/* the header is eating the start of the first entry */
+		return PyBytes_FromStringAndSize(data + 4, self->hdrsize - 4);
+	}
+	return PyBytes_FromStringAndSize(data, self->hdrsize);
+}
 
 /*
  * Return the hash of node corresponding to the given rev.
@@ -2859,6 +2899,10 @@
     {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
      "find length of shortest hex nodeid of a binary ID"},
     {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
+    {"entry_binary", (PyCFunction)index_entry_binary, METH_O,
+     "return an entry in binary form"},
+    {"pack_header", (PyCFunction)index_pack_header, METH_VARARGS,
+     "pack the revlog header information into binary"},
     {NULL} /* Sentinel */
 };
 
--- a/mercurial/changegroup.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/changegroup.py	Mon May 03 18:55:19 2021 +0200
@@ -15,7 +15,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -673,7 +672,7 @@
 
     if delta.delta is not None:
         prefix, data = b'', delta.delta
-    elif delta.basenode == nullid:
+    elif delta.basenode == repo.nullid:
         data = delta.revision
         prefix = mdiff.trivialdiffheader(len(data))
     else:
--- a/mercurial/changelog.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/changelog.py	Mon May 03 18:55:19 2021 +0200
@@ -11,7 +11,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
 )
 from .thirdparty import attr
 
@@ -221,7 +220,7 @@
 
     def __new__(cls, cl, text, sidedata, cpsd):
         if not text:
-            return _changelogrevision(extra=_defaultextra, manifest=nullid)
+            return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
 
         self = super(changelogrevision, cls).__new__(cls)
         # We could return here and implement the following as an __init__.
--- a/mercurial/cmdutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/cmdutil.py	Mon May 03 18:55:19 2021 +0200
@@ -15,7 +15,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -998,11 +997,6 @@
                 _(b"a branch of the same name already exists")
             )
 
-        if repo.revs(b'obsolete() and %ld', revs):
-            raise error.InputError(
-                _(b"cannot change branch of a obsolete changeset")
-            )
-
         # make sure only topological heads
         if repo.revs(b'heads(%ld) - head()', revs):
             raise error.InputError(
@@ -1097,7 +1091,7 @@
     'hint' is the usual hint given to Abort exception.
     """
 
-    if merge and repo.dirstate.p2() != nullid:
+    if merge and repo.dirstate.p2() != repo.nullid:
         raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
     st = repo.status()
     if st.modified or st.added or st.removed or st.deleted:
@@ -2104,7 +2098,7 @@
     if parents:
         prev = parents[0]
     else:
-        prev = nullid
+        prev = repo.nullid
 
     fm.context(ctx=ctx)
     fm.plain(b'# HG changeset patch\n')
@@ -2967,7 +2961,7 @@
         ms.reset()
 
         # Reroute the working copy parent to the new changeset
-        repo.setparents(newid, nullid)
+        repo.setparents(newid, repo.nullid)
 
         # Fixing the dirstate because localrepo.commitctx does not update
         # it. This is rather convenient because we did not need to update
@@ -3322,7 +3316,7 @@
 
         # in case of merge, files that are actually added can be reported as
         # modified, we need to post process the result
-        if p2 != nullid:
+        if p2 != repo.nullid:
             mergeadd = set(dsmodified)
             for path in dsmodified:
                 if path in mf:
@@ -3593,7 +3587,7 @@
         # We're reverting to our parent. If possible, we'd like status
         # to report the file as clean. We have to use normallookup for
         # merges to avoid losing information about merged/dirty files.
-        if p2 != nullid:
+        if p2 != repo.nullid:
             normal = repo.dirstate.normallookup
         else:
             normal = repo.dirstate.normal
@@ -3690,7 +3684,7 @@
             repo.dirstate.add(f)
 
     normal = repo.dirstate.normallookup
-    if node == parent and p2 == nullid:
+    if node == parent and p2 == repo.nullid:
         normal = repo.dirstate.normal
     for f in actions[b'undelete'][0]:
         if interactive:
--- a/mercurial/commands.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/commands.py	Mon May 03 18:55:19 2021 +0200
@@ -15,10 +15,8 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
     short,
-    wdirhex,
     wdirrev,
 )
 from .pycompat import open
@@ -486,7 +484,7 @@
                     return b'%d ' % rev
 
         def formathex(h):
-            if h == wdirhex:
+            if h == repo.nodeconstants.wdirhex:
                 return b'%s+' % shorthex(hex(ctx.p1().node()))
             else:
                 return b'%s ' % shorthex(h)
@@ -809,9 +807,9 @@
         )
 
     p1, p2 = repo.changelog.parents(node)
-    if p1 == nullid:
+    if p1 == repo.nullid:
         raise error.InputError(_(b'cannot backout a change with no parents'))
-    if p2 != nullid:
+    if p2 != repo.nullid:
         if not opts.get(b'parent'):
             raise error.InputError(_(b'cannot backout a merge changeset'))
         p = repo.lookup(opts[b'parent'])
@@ -1085,7 +1083,7 @@
                 )
         else:
             node, p2 = repo.dirstate.parents()
-            if p2 != nullid:
+            if p2 != repo.nullid:
                 raise error.StateError(_(b'current bisect revision is a merge'))
         if rev:
             if not nodes:
@@ -4847,7 +4845,7 @@
 
     opts = pycompat.byteskwargs(opts)
     abort = opts.get(b'abort')
-    if abort and repo.dirstate.p2() == nullid:
+    if abort and repo.dirstate.p2() == repo.nullid:
         cmdutil.wrongtooltocontinue(repo, _(b'merge'))
     cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
     if abort:
@@ -5072,7 +5070,7 @@
 
     displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
     for n in p:
-        if n != nullid:
+        if n != repo.nullid:
             displayer.show(repo[n])
     displayer.close()
 
@@ -5128,15 +5126,9 @@
     """
 
     opts = pycompat.byteskwargs(opts)
+
+    pathitems = urlutil.list_paths(ui, search)
     ui.pager(b'paths')
-    if search:
-        pathitems = [
-            (name, path)
-            for name, path in pycompat.iteritems(ui.paths)
-            if name == search
-        ]
-    else:
-        pathitems = sorted(pycompat.iteritems(ui.paths))
 
     fm = ui.formatter(b'paths', opts)
     if fm.isplain():
@@ -6105,7 +6097,7 @@
     with repo.wlock():
         ms = mergestatemod.mergestate.read(repo)
 
-        if not (ms.active() or repo.dirstate.p2() != nullid):
+        if not (ms.active() or repo.dirstate.p2() != repo.nullid):
             raise error.StateError(
                 _(b'resolve command not applicable when not merging')
             )
@@ -6223,7 +6215,7 @@
                     raise
 
         ms.commit()
-        branchmerge = repo.dirstate.p2() != nullid
+        branchmerge = repo.dirstate.p2() != repo.nullid
         mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
 
         if not didwork and pats:
@@ -6315,7 +6307,7 @@
         opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
 
     parent, p2 = repo.dirstate.parents()
-    if not opts.get(b'rev') and p2 != nullid:
+    if not opts.get(b'rev') and p2 != repo.nullid:
         # revert after merge is a trap for new users (issue2915)
         raise error.InputError(
             _(b'uncommitted merge with no revision specified'),
@@ -6335,7 +6327,7 @@
         or opts.get(b'interactive')
     ):
         msg = _(b"no files or directories specified")
-        if p2 != nullid:
+        if p2 != repo.nullid:
             hint = _(
                 b"uncommitted merge, use --all to discard all changes,"
                 b" or 'hg update -C .' to abort the merge"
@@ -7396,7 +7388,7 @@
             for n in names:
                 if repo.tagtype(n) == b'global':
                     alltags = tagsmod.findglobaltags(ui, repo)
-                    if alltags[n][0] == nullid:
+                    if alltags[n][0] == repo.nullid:
                         raise error.InputError(
                             _(b"tag '%s' is already removed") % n
                         )
@@ -7423,7 +7415,7 @@
                     )
         if not opts.get(b'local'):
             p1, p2 = repo.dirstate.parents()
-            if p2 != nullid:
+            if p2 != repo.nullid:
                 raise error.StateError(_(b'uncommitted merge'))
             bheads = repo.branchheads()
             if not opts.get(b'force') and bheads and p1 not in bheads:
--- a/mercurial/commit.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/commit.py	Mon May 03 18:55:19 2021 +0200
@@ -10,7 +10,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
 )
 
@@ -277,10 +276,10 @@
     """
 
     fname = fctx.path()
-    fparent1 = manifest1.get(fname, nullid)
-    fparent2 = manifest2.get(fname, nullid)
+    fparent1 = manifest1.get(fname, repo.nullid)
+    fparent2 = manifest2.get(fname, repo.nullid)
     touched = None
-    if fparent1 == fparent2 == nullid:
+    if fparent1 == fparent2 == repo.nullid:
         touched = 'added'
 
     if isinstance(fctx, context.filectx):
@@ -291,9 +290,11 @@
         if node in [fparent1, fparent2]:
             repo.ui.debug(b'reusing %s filelog entry\n' % fname)
             if (
-                fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+                fparent1 != repo.nullid
+                and manifest1.flags(fname) != fctx.flags()
             ) or (
-                fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+                fparent2 != repo.nullid
+                and manifest2.flags(fname) != fctx.flags()
             ):
                 touched = 'modified'
             return node, touched
@@ -327,7 +328,9 @@
         newfparent = fparent2
 
         if manifest2:  # branch merge
-            if fparent2 == nullid or cnode is None:  # copied on remote side
+            if (
+                fparent2 == repo.nullid or cnode is None
+            ):  # copied on remote side
                 if cfname in manifest2:
                     cnode = manifest2[cfname]
                     newfparent = fparent1
@@ -346,7 +349,7 @@
             if includecopymeta:
                 meta[b"copy"] = cfname
                 meta[b"copyrev"] = hex(cnode)
-            fparent1, fparent2 = nullid, newfparent
+            fparent1, fparent2 = repo.nullid, newfparent
         else:
             repo.ui.warn(
                 _(
@@ -356,20 +359,20 @@
                 % (fname, cfname)
             )
 
-    elif fparent1 == nullid:
-        fparent1, fparent2 = fparent2, nullid
-    elif fparent2 != nullid:
+    elif fparent1 == repo.nullid:
+        fparent1, fparent2 = fparent2, repo.nullid
+    elif fparent2 != repo.nullid:
         if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
-            fparent1, fparent2 = fparent2, nullid
+            fparent1, fparent2 = fparent2, repo.nullid
         elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
-            fparent1, fparent2 = fparent1, nullid
+            fparent1, fparent2 = fparent1, repo.nullid
         # is one parent an ancestor of the other?
         else:
             fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
             if fparent1 in fparentancestors:
-                fparent1, fparent2 = fparent2, nullid
+                fparent1, fparent2 = fparent2, repo.nullid
             elif fparent2 in fparentancestors:
-                fparent2 = nullid
+                fparent2 = repo.nullid
 
     force_new_node = False
     # The file might have been deleted by merge code and user explicitly choose
@@ -384,9 +387,14 @@
         force_new_node = True
     # is the file changed?
     text = fctx.data()
-    if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
+    if (
+        fparent2 != repo.nullid
+        or meta
+        or flog.cmp(fparent1, text)
+        or force_new_node
+    ):
         if touched is None:  # do not overwrite added
-            if fparent2 == nullid:
+            if fparent2 == repo.nullid:
                 touched = 'modified'
             else:
                 touched = 'merged'
--- a/mercurial/context.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/context.py	Mon May 03 18:55:19 2021 +0200
@@ -14,14 +14,9 @@
 
 from .i18n import _
 from .node import (
-    addednodeid,
     hex,
-    modifiednodeid,
-    nullid,
     nullrev,
     short,
-    wdirfilenodeids,
-    wdirhex,
 )
 from .pycompat import (
     getattr,
@@ -140,7 +135,7 @@
                 removed.append(fn)
             elif flag1 != flag2:
                 modified.append(fn)
-            elif node2 not in wdirfilenodeids:
+            elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
                 # When comparing files between two commits, we save time by
                 # not comparing the file contents when the nodeids differ.
                 # Note that this means we incorrectly report a reverted change
@@ -737,7 +732,7 @@
             n2 = c2._parents[0]._node
         cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
         if not cahs:
-            anc = nullid
+            anc = self._repo.nodeconstants.nullid
         elif len(cahs) == 1:
             anc = cahs[0]
         else:
@@ -1132,7 +1127,11 @@
         _path = self._path
         fl = self._filelog
         parents = self._filelog.parents(self._filenode)
-        pl = [(_path, node, fl) for node in parents if node != nullid]
+        pl = [
+            (_path, node, fl)
+            for node in parents
+            if node != self._repo.nodeconstants.nullid
+        ]
 
         r = fl.renamed(self._filenode)
         if r:
@@ -1556,12 +1555,12 @@
         return self._repo.dirstate[key] not in b"?r"
 
     def hex(self):
-        return wdirhex
+        return self._repo.nodeconstants.wdirhex
 
     @propertycache
     def _parents(self):
         p = self._repo.dirstate.parents()
-        if p[1] == nullid:
+        if p[1] == self._repo.nodeconstants.nullid:
             p = p[:-1]
         # use unfiltered repo to delay/avoid loading obsmarkers
         unfi = self._repo.unfiltered()
@@ -1572,7 +1571,9 @@
             for n in p
         ]
 
-    def setparents(self, p1node, p2node=nullid):
+    def setparents(self, p1node, p2node=None):
+        if p2node is None:
+            p2node = self._repo.nodeconstants.nullid
         dirstate = self._repo.dirstate
         with dirstate.parentchange():
             copies = dirstate.setparents(p1node, p2node)
@@ -1584,7 +1585,7 @@
                 for f in copies:
                     if f not in pctx and copies[f] in pctx:
                         dirstate.copy(copies[f], f)
-            if p2node == nullid:
+            if p2node == self._repo.nodeconstants.nullid:
                 for f, s in sorted(dirstate.copies().items()):
                     if f not in pctx and s not in pctx:
                         dirstate.copy(None, f)
@@ -1944,8 +1945,8 @@
 
         ff = self._flagfunc
         for i, l in (
-            (addednodeid, status.added),
-            (modifiednodeid, status.modified),
+            (self._repo.nodeconstants.addednodeid, status.added),
+            (self._repo.nodeconstants.modifiednodeid, status.modified),
         ):
             for f in l:
                 man[f] = i
@@ -2070,13 +2071,18 @@
         path = self.copysource()
         if not path:
             return None
-        return path, self._changectx._parents[0]._manifest.get(path, nullid)
+        return (
+            path,
+            self._changectx._parents[0]._manifest.get(
+                path, self._repo.nodeconstants.nullid
+            ),
+        )
 
     def parents(self):
         '''return parent filectxs, following copies if necessary'''
 
         def filenode(ctx, path):
-            return ctx._manifest.get(path, nullid)
+            return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
 
         path = self._path
         fl = self._filelog
@@ -2094,7 +2100,7 @@
         return [
             self._parentfilectx(p, fileid=n, filelog=l)
             for p, n, l in pl
-            if n != nullid
+            if n != self._repo.nodeconstants.nullid
         ]
 
     def children(self):
@@ -2222,7 +2228,9 @@
         # ``overlayworkingctx`` (e.g. with --collapse).
         util.clearcachedproperty(self, b'_manifest')
 
-    def setparents(self, p1node, p2node=nullid):
+    def setparents(self, p1node, p2node=None):
+        if p2node is None:
+            p2node = self._repo.nodeconstants.nullid
         assert p1node == self._wrappedctx.node()
         self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
 
@@ -2248,10 +2256,10 @@
 
         flag = self._flagfunc
         for path in self.added():
-            man[path] = addednodeid
+            man[path] = self._repo.nodeconstants.addednodeid
             man.setflag(path, flag(path))
         for path in self.modified():
-            man[path] = modifiednodeid
+            man[path] = self._repo.nodeconstants.modifiednodeid
             man.setflag(path, flag(path))
         for path in self.removed():
             del man[path]
@@ -2827,7 +2835,7 @@
         )
         self._rev = None
         self._node = None
-        parents = [(p or nullid) for p in parents]
+        parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
         p1, p2 = parents
         self._parents = [self._repo[p] for p in (p1, p2)]
         files = sorted(set(files))
@@ -2866,10 +2874,10 @@
         man = pctx.manifest().copy()
 
         for f in self._status.modified:
-            man[f] = modifiednodeid
+            man[f] = self._repo.nodeconstants.modifiednodeid
 
         for f in self._status.added:
-            man[f] = addednodeid
+            man[f] = self._repo.nodeconstants.addednodeid
 
         for f in self._status.removed:
             if f in man:
@@ -3006,12 +3014,12 @@
         # sanity check to ensure that the reused manifest parents are
         # manifests of our commit parents
         mp1, mp2 = self.manifestctx().parents
-        if p1 != nullid and p1.manifestnode() != mp1:
+        if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
             raise RuntimeError(
                 r"can't reuse the manifest: its p1 "
                 r"doesn't match the new ctx p1"
             )
-        if p2 != nullid and p2.manifestnode() != mp2:
+        if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
             raise RuntimeError(
                 r"can't reuse the manifest: "
                 r"its p2 doesn't match the new ctx p2"
--- a/mercurial/copies.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/copies.py	Mon May 03 18:55:19 2021 +0200
@@ -12,10 +12,7 @@
 import os
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 
 from . import (
     match as matchmod,
@@ -579,7 +576,7 @@
             parents = fctx._filelog.parents(fctx._filenode)
             nb_parents = 0
             for n in parents:
-                if n != nullid:
+                if n != repo.nullid:
                     nb_parents += 1
             return nb_parents >= 2
 
--- a/mercurial/debugcommands.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/debugcommands.py	Mon May 03 18:55:19 2021 +0200
@@ -30,7 +30,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -1667,7 +1666,7 @@
         node = r.node(i)
         pp = r.parents(node)
         ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
-        if pp[1] != nullid:
+        if pp[1] != repo.nullid:
             ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
     ui.write(b"}\n")
 
@@ -1675,7 +1674,7 @@
 @command(b'debugindexstats', [])
 def debugindexstats(ui, repo):
     """show stats related to the changelog index"""
-    repo.changelog.shortest(nullid, 1)
+    repo.changelog.shortest(repo.nullid, 1)
     index = repo.changelog.index
     if not util.safehasattr(index, b'stats'):
         raise error.Abort(_(b'debugindexstats only works with native code'))
@@ -2425,7 +2424,7 @@
             # arbitrary node identifiers, possibly not present in the
             # local repository.
             n = bin(s)
-            if len(n) != len(nullid):
+            if len(n) != repo.nodeconstants.nodelen:
                 raise TypeError()
             return n
         except TypeError:
@@ -3328,7 +3327,7 @@
             try:
                 pp = r.parents(node)
             except Exception:
-                pp = [nullid, nullid]
+                pp = [repo.nullid, repo.nullid]
             if ui.verbose:
                 ui.write(
                     b"% 6d % 9d % 7d % 7d %s %s %s\n"
@@ -3742,7 +3741,9 @@
         for n in chlist:
             if limit is not None and count >= limit:
                 break
-            parents = [True for p in other.changelog.parents(n) if p != nullid]
+            parents = [
+                True for p in other.changelog.parents(n) if p != repo.nullid
+            ]
             if opts.get(b"no_merges") and len(parents) == 2:
                 continue
             count += 1
--- a/mercurial/dirstate.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/dirstate.py	Mon May 03 18:55:19 2021 +0200
@@ -14,7 +14,6 @@
 import stat
 
 from .i18n import _
-from .node import nullid
 from .pycompat import delattr
 
 from hgdemandimport import tracing
@@ -314,7 +313,7 @@
     def branch(self):
         return encoding.tolocal(self._branch)
 
-    def setparents(self, p1, p2=nullid):
+    def setparents(self, p1, p2=None):
         """Set dirstate parents to p1 and p2.
 
         When moving from two parents to one, 'm' merged entries a
@@ -323,6 +322,8 @@
 
         See localrepo.setparents()
         """
+        if p2 is None:
+            p2 = self._nodeconstants.nullid
         if self._parentwriters == 0:
             raise ValueError(
                 b"cannot set dirstate parent outside of "
@@ -335,7 +336,10 @@
             self._origpl = self._pl
         self._map.setparents(p1, p2)
         copies = {}
-        if oldp2 != nullid and p2 == nullid:
+        if (
+            oldp2 != self._nodeconstants.nullid
+            and p2 == self._nodeconstants.nullid
+        ):
             candidatefiles = self._map.nonnormalset.union(
                 self._map.otherparentset
             )
@@ -459,7 +463,7 @@
 
     def normallookup(self, f):
         '''Mark a file normal, but possibly dirty.'''
-        if self._pl[1] != nullid:
+        if self._pl[1] != self._nodeconstants.nullid:
             # if there is a merge going on and the file was either
             # in state 'm' (-1) or coming from other parent (-2) before
             # being removed, restore that state.
@@ -481,7 +485,7 @@
 
     def otherparent(self, f):
         '''Mark as coming from the other parent, always dirty.'''
-        if self._pl[1] == nullid:
+        if self._pl[1] == self._nodeconstants.nullid:
             raise error.Abort(
                 _(b"setting %r to other parent only allowed in merges") % f
             )
@@ -503,7 +507,7 @@
         self._dirty = True
         oldstate = self[f]
         size = 0
-        if self._pl[1] != nullid:
+        if self._pl[1] != self._nodeconstants.nullid:
             entry = self._map.get(f)
             if entry is not None:
                 # backup the previous state
@@ -519,7 +523,7 @@
 
     def merge(self, f):
         '''Mark a file merged.'''
-        if self._pl[1] == nullid:
+        if self._pl[1] == self._nodeconstants.nullid:
             return self.normallookup(f)
         return self.otherparent(f)
 
@@ -638,7 +642,7 @@
 
         if self._origpl is None:
             self._origpl = self._pl
-        self._map.setparents(parent, nullid)
+        self._map.setparents(parent, self._nodeconstants.nullid)
 
         for f in to_lookup:
             self.normallookup(f)
@@ -1459,7 +1463,7 @@
     def clear(self):
         self._map.clear()
         self.copymap.clear()
-        self.setparents(nullid, nullid)
+        self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
         util.clearcachedproperty(self, b"_dirs")
         util.clearcachedproperty(self, b"_alldirs")
         util.clearcachedproperty(self, b"filefoldmap")
@@ -1636,7 +1640,10 @@
                     st[self._nodelen : 2 * self._nodelen],
                 )
             elif l == 0:
-                self._parents = (nullid, nullid)
+                self._parents = (
+                    self._nodeconstants.nullid,
+                    self._nodeconstants.nullid,
+                )
             else:
                 raise error.Abort(
                     _(b'working directory state appears damaged!')
@@ -1794,7 +1801,9 @@
         def clear(self):
             self._rustmap.clear()
             self._inner_rustmap.clear()
-            self.setparents(nullid, nullid)
+            self.setparents(
+                self._nodeconstants.nullid, self._nodeconstants.nullid
+            )
             util.clearcachedproperty(self, b"_dirs")
             util.clearcachedproperty(self, b"_alldirs")
             util.clearcachedproperty(self, b"dirfoldmap")
--- a/mercurial/discovery.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/discovery.py	Mon May 03 18:55:19 2021 +0200
@@ -12,7 +12,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     short,
 )
 
@@ -107,7 +106,7 @@
         if missingroots:
             discbases = []
             for n in missingroots:
-                discbases.extend([p for p in cl.parents(n) if p != nullid])
+                discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
             # TODO remove call to nodesbetween.
             # TODO populate attributes on outgoing instance instead of setting
             # discbases.
@@ -116,7 +115,7 @@
             ancestorsof = heads
             commonheads = [n for n in discbases if n not in included]
         elif not commonheads:
-            commonheads = [nullid]
+            commonheads = [repo.nullid]
         self.commonheads = commonheads
         self.ancestorsof = ancestorsof
         self._revlog = cl
@@ -381,7 +380,7 @@
     # - a local outgoing head descended from update
     # - a remote head that's known locally and not
     #   ancestral to an outgoing head
-    if remoteheads == [nullid]:
+    if remoteheads == [repo.nullid]:
         # remote is empty, nothing to check.
         return
 
--- a/mercurial/dispatch.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/dispatch.py	Mon May 03 18:55:19 2021 +0200
@@ -1064,6 +1064,16 @@
     if req.earlyoptions[b'profile']:
         for ui_ in uis:
             ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
+    elif req.earlyoptions[b'profile'] is False:
+        # Check for it being set already, so that we don't pollute the config
+        # with this when using chg in the very common case that it's not
+        # enabled.
+        if lui.configbool(b'profiling', b'enabled'):
+            # Only do this on lui so that `chg foo` with a user config setting
+            # profiling.enabled=1 still shows profiling information (chg will
+            # specify `--no-profile` when `hg serve` is starting up, we don't
+            # want that to propagate to every later invocation).
+            lui.setconfig(b'profiling', b'enabled', b'false', b'--no-profile')
 
     profile = lui.configbool(b'profiling', b'enabled')
     with profiling.profile(lui, enabled=profile) as profiler:
--- a/mercurial/exchange.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/exchange.py	Mon May 03 18:55:19 2021 +0200
@@ -13,7 +13,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     nullrev,
 )
 from . import (
@@ -164,7 +163,7 @@
         hasnode = cl.hasnode
         common = [n for n in common if hasnode(n)]
     else:
-        common = [nullid]
+        common = [repo.nullid]
     if not heads:
         heads = cl.heads()
     return discovery.outgoing(repo, common, heads)
@@ -1839,7 +1838,7 @@
     if (
         pullop.remote.capable(b'clonebundles')
         and pullop.heads is None
-        and list(pullop.common) == [nullid]
+        and list(pullop.common) == [pullop.repo.nullid]
     ):
         kwargs[b'cbattempted'] = pullop.clonebundleattempted
 
@@ -1849,7 +1848,7 @@
         pullop.repo.ui.status(_(b"no changes found\n"))
         pullop.cgresult = 0
     else:
-        if pullop.heads is None and list(pullop.common) == [nullid]:
+        if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
             pullop.repo.ui.status(_(b"requesting all changes\n"))
     if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
         remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
@@ -1920,7 +1919,7 @@
         pullop.cgresult = 0
         return
     tr = pullop.gettransaction()
-    if pullop.heads is None and list(pullop.common) == [nullid]:
+    if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
         pullop.repo.ui.status(_(b"requesting all changes\n"))
     elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
         # issue1320, avoid a race if remote changed after discovery
--- a/mercurial/exchangev2.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/exchangev2.py	Mon May 03 18:55:19 2021 +0200
@@ -11,10 +11,7 @@
 import weakref
 
 from .i18n import _
-from .node import (
-    nullid,
-    short,
-)
+from .node import short
 from . import (
     bookmarks,
     error,
@@ -304,7 +301,7 @@
         if set(remoteheads).issubset(common):
             fetch = []
 
-    common.discard(nullid)
+    common.discard(repo.nullid)
 
     return common, fetch, remoteheads
 
@@ -413,7 +410,7 @@
                 # Linknode is always itself for changesets.
                 cset[b'node'],
                 # We always send full revisions. So delta base is not set.
-                nullid,
+                repo.nullid,
                 mdiff.trivialdiffheader(len(data)) + data,
                 # Flags not yet supported.
                 0,
@@ -478,7 +475,7 @@
                 basenode = manifest[b'deltabasenode']
                 delta = extrafields[b'delta']
             elif b'revision' in extrafields:
-                basenode = nullid
+                basenode = repo.nullid
                 revision = extrafields[b'revision']
                 delta = mdiff.trivialdiffheader(len(revision)) + revision
             else:
@@ -610,7 +607,7 @@
                 basenode = filerevision[b'deltabasenode']
                 delta = extrafields[b'delta']
             elif b'revision' in extrafields:
-                basenode = nullid
+                basenode = repo.nullid
                 revision = extrafields[b'revision']
                 delta = mdiff.trivialdiffheader(len(revision)) + revision
             else:
@@ -705,7 +702,7 @@
                 basenode = filerevision[b'deltabasenode']
                 delta = extrafields[b'delta']
             elif b'revision' in extrafields:
-                basenode = nullid
+                basenode = repo.nullid
                 revision = extrafields[b'revision']
                 delta = mdiff.trivialdiffheader(len(revision)) + revision
             else:
--- a/mercurial/filelog.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/filelog.py	Mon May 03 18:55:19 2021 +0200
@@ -8,10 +8,7 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from . import (
     error,
     revlog,
@@ -42,7 +39,7 @@
         return self._revlog.__iter__()
 
     def hasnode(self, node):
-        if node in (nullid, nullrev):
+        if node in (self.nullid, nullrev):
             return False
 
         try:
--- a/mercurial/filemerge.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/filemerge.py	Mon May 03 18:55:19 2021 +0200
@@ -15,7 +15,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
     short,
 )
 from .pycompat import (
@@ -111,7 +110,7 @@
         return None
 
     def filenode(self):
-        return nullid
+        return self._ctx.repo().nullid
 
     _customcmp = True
 
--- a/mercurial/help.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/help.py	Mon May 03 18:55:19 2021 +0200
@@ -540,6 +540,12 @@
             TOPIC_CATEGORY_CONCEPTS,
         ),
         (
+            [b"evolution"],
+            _(b"Safely rewriting history (EXPERIMENTAL)"),
+            loaddoc(b'evolution'),
+            TOPIC_CATEGORY_CONCEPTS,
+        ),
+        (
             [b'scripting'],
             _(b'Using Mercurial from scripts and automation'),
             loaddoc(b'scripting'),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/evolution.txt	Mon May 03 18:55:19 2021 +0200
@@ -0,0 +1,56 @@
+Obsolescence markers make it possible to mark changesets that have been
+deleted or superseded in a new version of the changeset.
+
+Unlike the previous way of handling such changes, by stripping the old
+changesets from the repository, obsolescence markers can be propagated
+between repositories. This allows for a safe and simple way of exchanging
+mutable history and altering it after the fact. Changeset phases are
+respected, such that only draft and secret changesets can be altered (see
+:hg:`help phases` for details).
+
+Obsolescence is tracked using "obsolescence markers", a piece of metadata
+tracking which changesets have been made obsolete, potential successors for
+a given changeset, the moment the changeset was marked as obsolete, and the
+user who performed the rewriting operation. The markers are stored
+separately from standard changeset data can be exchanged without any of the
+precursor changesets, preventing unnecessary exchange of obsolescence data.
+
+The complete set of obsolescence markers describes a history of changeset
+modifications that is orthogonal to the repository history of file
+modifications. This changeset history allows for detection and automatic
+resolution of edge cases arising from multiple users rewriting the same part
+of history concurrently.
+
+Current feature status
+======================
+
+This feature is still in development.
+
+Instability
+===========
+
+Rewriting changesets might introduce instability.
+
+There are two main kinds of instability: orphaning and diverging.
+
+Orphans are changesets left behind when their ancestors are rewritten.
+Divergence has two variants:
+
+* Content-divergence occurs when independent rewrites of the same changesets
+  lead to different results.
+
+* Phase-divergence occurs when the old (obsolete) version of a changeset
+  becomes public.
+
+It is possible to prevent local creation of orphans by using the following config::
+
+    [experimental]
+    evolution.createmarkers = true
+    evolution.exchange = true
+
+You can also enable that option explicitly::
+
+    [experimental]
+    evolution.createmarkers = true
+    evolution.exchange = true
+    evolution.allowunstable = true
--- a/mercurial/hg.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/hg.py	Mon May 03 18:55:19 2021 +0200
@@ -16,8 +16,7 @@
 from .i18n import _
 from .node import (
     hex,
-    nullhex,
-    nullid,
+    sha1nodeconstants,
     short,
 )
 from .pycompat import getattr
@@ -772,7 +771,7 @@
                             },
                         ).result()
 
-                    if rootnode != nullid:
+                    if rootnode != sha1nodeconstants.nullid:
                         sharepath = os.path.join(sharepool, hex(rootnode))
                     else:
                         ui.status(
@@ -883,7 +882,9 @@
             # we need to re-init the repo after manually copying the data
             # into it
             destpeer = peer(srcrepo, peeropts, dest)
-            srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
+            srcrepo.hook(
+                b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
+            )
         else:
             try:
                 # only pass ui when no srcrepo
@@ -1329,7 +1330,9 @@
         for n in chlist:
             if limit is not None and count >= limit:
                 break
-            parents = [p for p in other.changelog.parents(n) if p != nullid]
+            parents = [
+                p for p in other.changelog.parents(n) if p != repo.nullid
+            ]
             if opts.get(b'no_merges') and len(parents) == 2:
                 continue
             count += 1
@@ -1406,7 +1409,7 @@
     for n in revs:
         if limit is not None and count >= limit:
             break
-        parents = [p for p in cl.parents(n) if p != nullid]
+        parents = [p for p in cl.parents(n) if p != repo.nullid]
         if no_merges and len(parents) == 2:
             continue
         count += 1
--- a/mercurial/hgweb/webutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/hgweb/webutil.py	Mon May 03 18:55:19 2021 +0200
@@ -14,7 +14,7 @@
 import re
 
 from ..i18n import _
-from ..node import hex, nullid, short
+from ..node import hex, short
 from ..pycompat import setattr
 
 from .common import (
@@ -220,7 +220,7 @@
 def _siblings(siblings=None, hiderev=None):
     if siblings is None:
         siblings = []
-    siblings = [s for s in siblings if s.node() != nullid]
+    siblings = [s for s in siblings if s.node() != s.repo().nullid]
     if len(siblings) == 1 and siblings[0].rev() == hiderev:
         siblings = []
     return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
@@ -316,12 +316,16 @@
         yield {name: t}
 
 
-def showtag(repo, t1, node=nullid):
+def showtag(repo, t1, node=None):
+    if node is None:
+        node = repo.nullid
     args = (repo.nodetags, node, b'tag')
     return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
 
 
-def showbookmark(repo, t1, node=nullid):
+def showbookmark(repo, t1, node=None):
+    if node is None:
+        node = repo.nullid
     args = (repo.nodebookmarks, node, b'bookmark')
     return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
 
--- a/mercurial/interfaces/dirstate.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/interfaces/dirstate.py	Mon May 03 18:55:19 2021 +0200
@@ -2,8 +2,6 @@
 
 import contextlib
 
-from .. import node as nodemod
-
 from . import util as interfaceutil
 
 
@@ -97,7 +95,7 @@
     def branch():
         pass
 
-    def setparents(p1, p2=nodemod.nullid):
+    def setparents(p1, p2=None):
         """Set dirstate parents to p1 and p2.
 
         When moving from two parents to one, 'm' merged entries a
--- a/mercurial/localrepo.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/localrepo.py	Mon May 03 18:55:19 2021 +0200
@@ -19,7 +19,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     sha1nodeconstants,
     short,
@@ -1702,7 +1701,7 @@
                     _(b"warning: ignoring unknown working parent %s!\n")
                     % short(node)
                 )
-            return nullid
+            return self.nullid
 
     @storecache(narrowspec.FILENAME)
     def narrowpats(self):
@@ -1753,9 +1752,9 @@
     @unfilteredpropertycache
     def _quick_access_changeid_null(self):
         return {
-            b'null': (nullrev, nullid),
-            nullrev: (nullrev, nullid),
-            nullid: (nullrev, nullid),
+            b'null': (nullrev, self.nodeconstants.nullid),
+            nullrev: (nullrev, self.nodeconstants.nullid),
+            self.nullid: (nullrev, self.nullid),
         }
 
     @unfilteredpropertycache
@@ -1765,7 +1764,7 @@
         quick = self._quick_access_changeid_null.copy()
         cl = self.unfiltered().changelog
         for node in self.dirstate.parents():
-            if node == nullid:
+            if node == self.nullid:
                 continue
             rev = cl.index.get_rev(node)
             if rev is None:
@@ -1785,7 +1784,7 @@
                 quick[r] = pair
                 quick[n] = pair
         p1node = self.dirstate.p1()
-        if p1node != nullid:
+        if p1node != self.nullid:
             quick[b'.'] = quick[p1node]
         return quick
 
@@ -1841,7 +1840,7 @@
                 # when we know that '.' won't be hidden
                 node = self.dirstate.p1()
                 rev = self.unfiltered().changelog.rev(node)
-            elif len(changeid) == 20:
+            elif len(changeid) == self.nodeconstants.nodelen:
                 try:
                     node = changeid
                     rev = self.changelog.rev(changeid)
@@ -1862,7 +1861,7 @@
                     changeid = hex(changeid)  # for the error message
                     raise
 
-            elif len(changeid) == 40:
+            elif len(changeid) == 2 * self.nodeconstants.nodelen:
                 node = bin(changeid)
                 rev = self.changelog.rev(node)
             else:
@@ -2037,7 +2036,7 @@
         # local encoding.
         tags = {}
         for (name, (node, hist)) in pycompat.iteritems(alltags):
-            if node != nullid:
+            if node != self.nullid:
                 tags[encoding.tolocal(name)] = node
         tags[b'tip'] = self.changelog.tip()
         tagtypes = {
@@ -2161,7 +2160,9 @@
     def wjoin(self, f, *insidef):
         return self.vfs.reljoin(self.root, f, *insidef)
 
-    def setparents(self, p1, p2=nullid):
+    def setparents(self, p1, p2=None):
+        if p2 is None:
+            p2 = self.nullid
         self[None].setparents(p1, p2)
         self._quick_access_changeid_invalidate()
 
@@ -3094,7 +3095,7 @@
                 subrepoutil.writestate(self, newstate)
 
             p1, p2 = self.dirstate.parents()
-            hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
+            hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
             try:
                 self.hook(
                     b"precommit", throw=True, parent1=hookp1, parent2=hookp2
@@ -3267,7 +3268,7 @@
             t = n
             while True:
                 p = self.changelog.parents(n)
-                if p[1] != nullid or p[0] == nullid:
+                if p[1] != self.nullid or p[0] == self.nullid:
                     b.append((t, n, p[0], p[1]))
                     break
                 n = p[0]
@@ -3280,7 +3281,7 @@
             n, l, i = top, [], 0
             f = 1
 
-            while n != bottom and n != nullid:
+            while n != bottom and n != self.nullid:
                 p = self.changelog.parents(n)[0]
                 if i == f:
                     l.append(n)
--- a/mercurial/logcmdutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/logcmdutil.py	Mon May 03 18:55:19 2021 +0200
@@ -12,12 +12,7 @@
 import posixpath
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-    wdirid,
-    wdirrev,
-)
+from .node import nullrev, wdirrev
 
 from .thirdparty import attr
 
@@ -357,7 +352,7 @@
         if self.ui.debugflag:
             mnode = ctx.manifestnode()
             if mnode is None:
-                mnode = wdirid
+                mnode = self.repo.nodeconstants.wdirid
                 mrev = wdirrev
             else:
                 mrev = self.repo.manifestlog.rev(mnode)
@@ -505,7 +500,11 @@
         )
 
         if self.ui.debugflag or b'manifest' in datahint:
-            fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
+            fm.data(
+                manifest=fm.hexfunc(
+                    ctx.manifestnode() or self.repo.nodeconstants.wdirid
+                )
+            )
         if self.ui.debugflag or b'extra' in datahint:
             fm.data(extra=fm.formatdict(ctx.extra()))
 
@@ -991,7 +990,7 @@
     """Return the initial set of revisions to be filtered or followed"""
     if wopts.revspec:
         revs = scmutil.revrange(repo, wopts.revspec)
-    elif wopts.follow and repo.dirstate.p1() == nullid:
+    elif wopts.follow and repo.dirstate.p1() == repo.nullid:
         revs = smartset.baseset()
     elif wopts.follow:
         revs = repo.revs(b'.')
--- a/mercurial/manifest.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/manifest.py	Mon May 03 18:55:19 2021 +0200
@@ -16,7 +16,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from .pycompat import getattr
@@ -43,7 +42,7 @@
 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
 
 
-def _parse(data):
+def _parse(nodelen, data):
     # This method does a little bit of excessive-looking
     # precondition checking. This is so that the behavior of this
     # class exactly matches its C counterpart to try and help
@@ -64,7 +63,7 @@
             nl -= 1
         else:
             flags = b''
-        if nl not in (40, 64):
+        if nl != 2 * nodelen:
             raise ValueError(b'Invalid manifest line')
 
         yield f, bin(n), flags
@@ -132,7 +131,7 @@
         else:
             hlen = nlpos - zeropos - 1
             flags = b''
-        if hlen not in (40, 64):
+        if hlen != 2 * self.lm._nodelen:
             raise error.StorageError(b'Invalid manifest line')
         hashval = unhexlify(
             data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
@@ -177,12 +176,14 @@
 
     def __init__(
         self,
+        nodelen,
         data,
         positions=None,
         extrainfo=None,
         extradata=None,
         hasremovals=False,
     ):
+        self._nodelen = nodelen
         if positions is None:
             self.positions = self.findlines(data)
             self.extrainfo = [0] * len(self.positions)
@@ -289,7 +290,7 @@
             hlen -= 1
         else:
             flags = b''
-        if hlen not in (40, 64):
+        if hlen != 2 * self._nodelen:
             raise error.StorageError(b'Invalid manifest line')
         hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
         return (hashval, flags)
@@ -345,6 +346,7 @@
     def copy(self):
         # XXX call _compact like in C?
         return _lazymanifest(
+            self._nodelen,
             self.data,
             self.positions,
             self.extrainfo,
@@ -455,7 +457,7 @@
 
     def filtercopy(self, filterfn):
         # XXX should be optimized
-        c = _lazymanifest(b'')
+        c = _lazymanifest(self._nodelen, b'')
         for f, n, fl in self.iterentries():
             if filterfn(f):
                 c[f] = n, fl
@@ -470,8 +472,9 @@
 
 @interfaceutil.implementer(repository.imanifestdict)
 class manifestdict(object):
-    def __init__(self, data=b''):
-        self._lm = _lazymanifest(data)
+    def __init__(self, nodelen, data=b''):
+        self._nodelen = nodelen
+        self._lm = _lazymanifest(nodelen, data)
 
     def __getitem__(self, key):
         return self._lm[key][0]
@@ -579,14 +582,14 @@
             return self.copy()
 
         if self._filesfastpath(match):
-            m = manifestdict()
+            m = manifestdict(self._nodelen)
             lm = self._lm
             for fn in match.files():
                 if fn in lm:
                     m._lm[fn] = lm[fn]
             return m
 
-        m = manifestdict()
+        m = manifestdict(self._nodelen)
         m._lm = self._lm.filtercopy(match)
         return m
 
@@ -629,7 +632,7 @@
             return b''
 
     def copy(self):
-        c = manifestdict()
+        c = manifestdict(self._nodelen)
         c._lm = self._lm.copy()
         return c
 
@@ -795,7 +798,8 @@
     def __init__(self, nodeconstants, dir=b'', text=b''):
         self._dir = dir
         self.nodeconstants = nodeconstants
-        self._node = nullid
+        self._node = self.nodeconstants.nullid
+        self._nodelen = self.nodeconstants.nodelen
         self._loadfunc = _noop
         self._copyfunc = _noop
         self._dirty = False
@@ -1323,7 +1327,7 @@
 
     def parse(self, text, readsubtree):
         selflazy = self._lazydirs
-        for f, n, fl in _parse(text):
+        for f, n, fl in _parse(self._nodelen, text):
             if fl == b't':
                 f = f + b'/'
                 # False below means "doesn't need to be copied" and can use the
@@ -1391,7 +1395,7 @@
                 continue
             subp1 = getnode(m1, d)
             subp2 = getnode(m2, d)
-            if subp1 == nullid:
+            if subp1 == self.nodeconstants.nullid:
                 subp1, subp2 = subp2, subp1
             writesubtree(subm, subp1, subp2, match)
 
@@ -1994,7 +1998,7 @@
             else:
                 m = manifestctx(self, node)
 
-        if node != nullid:
+        if node != self.nodeconstants.nullid:
             mancache = self._dirmancache.get(tree)
             if not mancache:
                 mancache = util.lrucachedict(self._cachesize)
@@ -2020,7 +2024,7 @@
 class memmanifestctx(object):
     def __init__(self, manifestlog):
         self._manifestlog = manifestlog
-        self._manifestdict = manifestdict()
+        self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
 
     def _storage(self):
         return self._manifestlog.getstorage(b'')
@@ -2082,8 +2086,9 @@
 
     def read(self):
         if self._data is None:
-            if self._node == nullid:
-                self._data = manifestdict()
+            nc = self._manifestlog.nodeconstants
+            if self._node == nc.nullid:
+                self._data = manifestdict(nc.nodelen)
             else:
                 store = self._storage()
                 if self._node in store.fulltextcache:
@@ -2092,7 +2097,7 @@
                     text = store.revision(self._node)
                     arraytext = bytearray(text)
                     store.fulltextcache[self._node] = arraytext
-                self._data = manifestdict(text)
+                self._data = manifestdict(nc.nodelen, text)
         return self._data
 
     def readfast(self, shallow=False):
@@ -2119,7 +2124,7 @@
         store = self._storage()
         r = store.rev(self._node)
         d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
-        return manifestdict(d)
+        return manifestdict(store.nodeconstants.nodelen, d)
 
     def find(self, key):
         return self.read().find(key)
@@ -2188,7 +2193,7 @@
     def read(self):
         if self._data is None:
             store = self._storage()
-            if self._node == nullid:
+            if self._node == self._manifestlog.nodeconstants.nullid:
                 self._data = treemanifest(self._manifestlog.nodeconstants)
             # TODO accessing non-public API
             elif store._treeondisk:
@@ -2245,7 +2250,7 @@
         if shallow:
             r = store.rev(self._node)
             d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
-            return manifestdict(d)
+            return manifestdict(store.nodeconstants.nodelen, d)
         else:
             # Need to perform a slow delta
             r0 = store.deltaparent(store.rev(self._node))
@@ -2274,7 +2279,9 @@
             return self.readdelta(shallow=shallow)
 
         if shallow:
-            return manifestdict(store.revision(self._node))
+            return manifestdict(
+                store.nodeconstants.nodelen, store.revision(self._node)
+            )
         else:
             return self.read()
 
--- a/mercurial/merge.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/merge.py	Mon May 03 18:55:19 2021 +0200
@@ -13,12 +13,7 @@
 import struct
 
 from .i18n import _
-from .node import (
-    addednodeid,
-    modifiednodeid,
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from .thirdparty import attr
 from .utils import stringutil
 from . import (
@@ -779,7 +774,7 @@
         # to flag the change. If wctx is a committed revision, we shouldn't
         # care for the dirty state of the working directory.
         if any(wctx.sub(s).dirty() for s in wctx.substate):
-            m1[b'.hgsubstate'] = modifiednodeid
+            m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
 
     # Don't use m2-vs-ma optimization if:
     # - ma is the same as m1 or m2, which we're just going to diff again later
@@ -944,7 +939,7 @@
                             mresult.addcommitinfo(
                                 f, b'merge-removal-candidate', b'yes'
                             )
-                elif n1 == addednodeid:
+                elif n1 == repo.nodeconstants.addednodeid:
                     # This file was locally added. We should forget it instead of
                     # deleting it.
                     mresult.addfile(
@@ -1785,7 +1780,7 @@
     if (
         fsmonitorwarning
         and not fsmonitorenabled
-        and p1node == nullid
+        and p1node == repo.nullid
         and num_gets >= fsmonitorthreshold
         and pycompat.sysplatform.startswith((b'linux', b'darwin'))
     ):
@@ -1913,7 +1908,7 @@
         else:
             if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
                 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
-                pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
+                pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
             else:
                 pas = [p1.ancestor(p2, warn=branchmerge)]
 
@@ -2112,7 +2107,7 @@
 
         ### apply phase
         if not branchmerge:  # just jump to the new rev
-            fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
+            fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
         # If we're doing a partial update, we need to skip updating
         # the dirstate.
         always = matcher is None or matcher.always()
@@ -2281,14 +2276,14 @@
     if keepconflictparent and stats.unresolvedcount:
         pother = ctx.node()
     else:
-        pother = nullid
+        pother = repo.nullid
         parents = ctx.parents()
         if keepparent and len(parents) == 2 and base in parents:
             parents.remove(base)
             pother = parents[0].node()
     # Never set both parents equal to each other
     if pother == pctx.node():
-        pother = nullid
+        pother = repo.nullid
 
     if wctx.isinmemory():
         wctx.setparents(pctx.node(), pother)
--- a/mercurial/mergestate.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/mergestate.py	Mon May 03 18:55:19 2021 +0200
@@ -9,7 +9,6 @@
 from .node import (
     bin,
     hex,
-    nullhex,
     nullrev,
 )
 from . import (
@@ -32,7 +31,7 @@
 
 
 def _filectxorabsent(hexnode, ctx, f):
-    if hexnode == nullhex:
+    if hexnode == ctx.repo().nodeconstants.nullhex:
         return filemerge.absentfilectx(ctx, f)
     else:
         return ctx[f]
@@ -248,7 +247,7 @@
         note: also write the local version to the `.hg/merge` directory.
         """
         if fcl.isabsent():
-            localkey = nullhex
+            localkey = self._repo.nodeconstants.nullhex
         else:
             localkey = mergestate.getlocalkey(fcl.path())
             self._make_backup(fcl, localkey)
@@ -354,7 +353,7 @@
                 flags = flo
         if preresolve:
             # restore local
-            if localkey != nullhex:
+            if localkey != self._repo.nodeconstants.nullhex:
                 self._restore_backup(wctx[dfile], localkey, flags)
             else:
                 wctx[dfile].remove(ignoremissing=True)
@@ -658,7 +657,10 @@
                 records.append(
                     (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
                 )
-            elif v[1] == nullhex or v[6] == nullhex:
+            elif (
+                v[1] == self._repo.nodeconstants.nullhex
+                or v[6] == self._repo.nodeconstants.nullhex
+            ):
                 # Change/Delete or Delete/Change conflicts. These are stored in
                 # 'C' records. v[1] is the local file, and is nullhex when the
                 # file is deleted locally ('dc'). v[6] is the remote file, and
--- a/mercurial/metadata.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/metadata.py	Mon May 03 18:55:19 2021 +0200
@@ -11,10 +11,7 @@
 import multiprocessing
 import struct
 
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from . import (
     error,
     pycompat,
@@ -617,7 +614,7 @@
         if f in ctx:
             fctx = ctx[f]
             parents = fctx._filelog.parents(fctx._filenode)
-            if parents[1] != nullid:
+            if parents[1] != ctx.repo().nullid:
                 merged.append(f)
     return merged
 
--- a/mercurial/obsolete.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/obsolete.py	Mon May 03 18:55:19 2021 +0200
@@ -73,11 +73,14 @@
 import struct
 
 from .i18n import _
+from .node import (
+    bin,
+    hex,
+)
 from .pycompat import getattr
 from .node import (
     bin,
     hex,
-    nullid,
 )
 from . import (
     encoding,
@@ -103,6 +106,7 @@
 # Options for obsolescence
 createmarkersopt = b'createmarkers'
 allowunstableopt = b'allowunstable'
+allowdivergenceopt = b'allowdivergence'
 exchangeopt = b'exchange'
 
 
@@ -141,10 +145,13 @@
 
     createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
     unstablevalue = _getoptionvalue(repo, allowunstableopt)
+    divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
     exchangevalue = _getoptionvalue(repo, exchangeopt)
 
     # createmarkers must be enabled if other options are enabled
-    if (unstablevalue or exchangevalue) and not createmarkersvalue:
+    if (
+        unstablevalue or divergencevalue or exchangevalue
+    ) and not createmarkersvalue:
         raise error.Abort(
             _(
                 b"'createmarkers' obsolete option must be enabled "
@@ -155,6 +162,7 @@
     return {
         createmarkersopt: createmarkersvalue,
         allowunstableopt: unstablevalue,
+        allowdivergenceopt: divergencevalue,
         exchangeopt: exchangevalue,
     }
 
@@ -526,14 +534,14 @@
                 children.setdefault(p, set()).add(mark)
 
 
-def _checkinvalidmarkers(markers):
+def _checkinvalidmarkers(repo, markers):
     """search for marker with invalid data and raise error if needed
 
     Exist as a separated function to allow the evolve extension for a more
     subtle handling.
     """
     for mark in markers:
-        if nullid in mark[1]:
+        if repo.nullid in mark[1]:
             raise error.Abort(
                 _(
                     b'bad obsolescence marker detected: '
@@ -727,7 +735,7 @@
             return []
         self._version, markers = _readmarkers(data)
         markers = list(markers)
-        _checkinvalidmarkers(markers)
+        _checkinvalidmarkers(self.repo, markers)
         return markers
 
     @propertycache
@@ -761,7 +769,7 @@
             _addpredecessors(self.predecessors, markers)
         if self._cached('children'):
             _addchildren(self.children, markers)
-        _checkinvalidmarkers(markers)
+        _checkinvalidmarkers(self.repo, markers)
 
     def relevantmarkers(self, nodes):
         """return a set of all obsolescence markers relevant to a set of nodes.
--- a/mercurial/patch.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/patch.py	Mon May 03 18:55:19 2021 +0200
@@ -20,7 +20,7 @@
 from .i18n import _
 from .node import (
     hex,
-    nullhex,
+    sha1nodeconstants,
     short,
 )
 from .pycompat import open
@@ -3100,8 +3100,8 @@
 
     ctx1, fctx1, path1, flag1, content1, date1 = data1
     ctx2, fctx2, path2, flag2, content2, date2 = data2
-    index1 = _gitindex(content1) if path1 in ctx1 else nullhex
-    index2 = _gitindex(content2) if path2 in ctx2 else nullhex
+    index1 = _gitindex(content1) if path1 in ctx1 else sha1nodeconstants.nullhex
+    index2 = _gitindex(content2) if path2 in ctx2 else sha1nodeconstants.nullhex
     if binary and opts.git and not opts.nobinary:
         text = mdiff.b85diff(content1, content2)
         if text:
--- a/mercurial/phases.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/phases.py	Mon May 03 18:55:19 2021 +0200
@@ -109,7 +109,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
     wdirrev,
@@ -862,7 +861,7 @@
         node = bin(nhex)
         phase = int(phase)
         if phase == public:
-            if node != nullid:
+            if node != repo.nullid:
                 repo.ui.warn(
                     _(
                         b'ignoring inconsistent public root'
@@ -919,10 +918,10 @@
     rev = cl.index.get_rev
     if not roots:
         return heads
-    if not heads or heads == [nullid]:
+    if not heads or heads == [repo.nullid]:
         return []
     # The logic operated on revisions, convert arguments early for convenience
-    new_heads = {rev(n) for n in heads if n != nullid}
+    new_heads = {rev(n) for n in heads if n != repo.nullid}
     roots = [rev(n) for n in roots]
     # compute the area we need to remove
     affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
--- a/mercurial/policy.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/policy.py	Mon May 03 18:55:19 2021 +0200
@@ -80,7 +80,7 @@
     ('cext', 'bdiff'): 3,
     ('cext', 'mpatch'): 1,
     ('cext', 'osutil'): 4,
-    ('cext', 'parsers'): 17,
+    ('cext', 'parsers'): 18,
 }
 
 # map import request to other package or module
--- a/mercurial/pure/parsers.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/pure/parsers.py	Mon May 03 18:55:19 2021 +0200
@@ -10,7 +10,10 @@
 import struct
 import zlib
 
-from ..node import nullid, nullrev
+from ..node import (
+    nullrev,
+    sha1nodeconstants,
+)
 from .. import (
     pycompat,
     util,
@@ -50,7 +53,7 @@
     # Size of a C long int, platform independent
     int_size = struct.calcsize(b'>i')
     # An empty index entry, used as a default value to be overridden, or nullrev
-    null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+    null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
 
     @util.propertycache
     def entry_size(self):
@@ -64,7 +67,7 @@
 
     @util.propertycache
     def _nodemap(self):
-        nodemap = nodemaputil.NodeMap({nullid: nullrev})
+        nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
         for r in range(0, len(self)):
             n = self[r][7]
             nodemap[n] = r
@@ -124,10 +127,27 @@
             r = (offset_type(0, gettype(r[0])),) + r[1:]
         return r
 
+    def pack_header(self, header):
+        """pack header information as binary"""
+        v_fmt = revlog_constants.INDEX_HEADER
+        return v_fmt.pack(header)
+
+    def entry_binary(self, rev):
+        """return the raw binary string representing a revision"""
+        entry = self[rev]
+        p = revlog_constants.INDEX_ENTRY_V1.pack(*entry)
+        if rev == 0:
+            p = p[revlog_constants.INDEX_HEADER.size :]
+        return p
+
 
 class IndexObject(BaseIndexObject):
     def __init__(self, data):
-        assert len(data) % self.entry_size == 0
+        assert len(data) % self.entry_size == 0, (
+            len(data),
+            self.entry_size,
+            len(data) % self.entry_size,
+        )
         self._data = data
         self._lgt = len(data) // self.entry_size
         self._extra = []
@@ -246,7 +266,7 @@
 
 class Index2Mixin(object):
     index_format = revlog_constants.INDEX_ENTRY_V2
-    null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
+    null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
 
     def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
         """
@@ -269,6 +289,14 @@
             msg = b"cannot rewrite entries outside of this transaction"
             raise KeyError(msg)
 
+    def entry_binary(self, rev):
+        """return the raw binary string representing a revision"""
+        entry = self[rev]
+        p = revlog_constants.INDEX_ENTRY_V2.pack(*entry)
+        if rev == 0:
+            p = p[revlog_constants.INDEX_HEADER.size :]
+        return p
+
 
 class IndexObject2(Index2Mixin, IndexObject):
     pass
--- a/mercurial/revlog.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/revlog.py	Mon May 03 18:55:19 2021 +0200
@@ -26,14 +26,9 @@
 from .node import (
     bin,
     hex,
-    nullhex,
-    nullid,
     nullrev,
     sha1nodeconstants,
     short,
-    wdirfilenodeids,
-    wdirhex,
-    wdirid,
     wdirrev,
 )
 from .i18n import _
@@ -41,9 +36,6 @@
 from .revlogutils.constants import (
     FLAG_GENERALDELTA,
     FLAG_INLINE_DATA,
-    INDEX_ENTRY_V0,
-    INDEX_ENTRY_V1,
-    INDEX_ENTRY_V2,
     INDEX_HEADER,
     REVLOGV0,
     REVLOGV1,
@@ -83,6 +75,7 @@
     deltas as deltautil,
     flagutil,
     nodemap as nodemaputil,
+    revlogv0,
     sidedata as sidedatautil,
 )
 from .utils import (
@@ -143,14 +136,6 @@
 )
 
 
-def getoffset(q):
-    return int(q >> 16)
-
-
-def gettype(q):
-    return int(q & 0xFFFF)
-
-
 def offset_type(offset, type):
     if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
         raise ValueError(b'unknown revlog index flags')
@@ -221,110 +206,32 @@
     node = attr.ib(default=None)
 
 
-class revlogoldindex(list):
-    entry_size = INDEX_ENTRY_V0.size
-
-    @property
-    def nodemap(self):
-        msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
-        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
-        return self._nodemap
-
-    @util.propertycache
-    def _nodemap(self):
-        nodemap = nodemaputil.NodeMap({nullid: nullrev})
-        for r in range(0, len(self)):
-            n = self[r][7]
-            nodemap[n] = r
-        return nodemap
-
-    def has_node(self, node):
-        """return True if the node exist in the index"""
-        return node in self._nodemap
-
-    def rev(self, node):
-        """return a revision for a node
-
-        If the node is unknown, raise a RevlogError"""
-        return self._nodemap[node]
-
-    def get_rev(self, node):
-        """return a revision for a node
-
-        If the node is unknown, return None"""
-        return self._nodemap.get(node)
-
-    def append(self, tup):
-        self._nodemap[tup[7]] = len(self)
-        super(revlogoldindex, self).append(tup)
-
-    def __delitem__(self, i):
-        if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
-            raise ValueError(b"deleting slices only supports a:-1 with step 1")
-        for r in pycompat.xrange(i.start, len(self)):
-            del self._nodemap[self[r][7]]
-        super(revlogoldindex, self).__delitem__(i)
-
-    def clearcaches(self):
-        self.__dict__.pop('_nodemap', None)
-
-    def __getitem__(self, i):
-        if i == -1:
-            return (0, 0, 0, -1, -1, -1, -1, nullid)
-        return list.__getitem__(self, i)
-
-
-class revlogoldio(object):
-    def parseindex(self, data, inline):
-        s = INDEX_ENTRY_V0.size
-        index = []
-        nodemap = nodemaputil.NodeMap({nullid: nullrev})
-        n = off = 0
-        l = len(data)
-        while off + s <= l:
-            cur = data[off : off + s]
-            off += s
-            e = INDEX_ENTRY_V0.unpack(cur)
-            # transform to revlogv1 format
-            e2 = (
-                offset_type(e[0], 0),
-                e[1],
-                -1,
-                e[2],
-                e[3],
-                nodemap.get(e[4], nullrev),
-                nodemap.get(e[5], nullrev),
-                e[6],
-            )
-            index.append(e2)
-            nodemap[e[6]] = n
-            n += 1
-
-        index = revlogoldindex(index)
-        return index, None
-
-    def packentry(self, entry, node, version, rev):
-        """return the binary representation of an entry
-
-        entry:   a tuple containing all the values (see index.__getitem__)
-        node:    a callback to convert a revision to nodeid
-        version: the changelog version
-        rev:     the revision number
-        """
-        if gettype(entry[0]):
-            raise error.RevlogError(
-                _(b'index entry flags need revlog version 1')
-            )
-        e2 = (
-            getoffset(entry[0]),
-            entry[1],
-            entry[3],
-            entry[4],
-            node(entry[5]),
-            node(entry[6]),
-            entry[7],
-        )
-        return INDEX_ENTRY_V0.pack(*e2)
+def parse_index_v1(data, inline):
+    # call the C implementation to parse the index data
+    index, cache = parsers.parse_index2(data, inline)
+    return index, cache
+
+
+def parse_index_v2(data, inline):
+    # call the C implementation to parse the index data
+    index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+    return index, cache
+
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+    def parse_index_v1_nodemap(data, inline):
+        index, cache = parsers.parse_index_devel_nodemap(data, inline)
+        return index, cache
+
+
+else:
+    parse_index_v1_nodemap = None
+
+
+def parse_index_v1_mixed(data, inline):
+    index, cache = parse_index_v1(data, inline)
+    return rustrevlog.MixedIndex(index), cache
 
 
 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
@@ -332,52 +239,6 @@
 _maxentrysize = 0x7FFFFFFF
 
 
-class revlogio(object):
-    def parseindex(self, data, inline):
-        # call the C implementation to parse the index data
-        index, cache = parsers.parse_index2(data, inline)
-        return index, cache
-
-    def packentry(self, entry, node, version, rev):
-        p = INDEX_ENTRY_V1.pack(*entry)
-        if rev == 0:
-            p = INDEX_HEADER.pack(version) + p[4:]
-        return p
-
-
-class revlogv2io(object):
-    def parseindex(self, data, inline):
-        index, cache = parsers.parse_index2(data, inline, revlogv2=True)
-        return index, cache
-
-    def packentry(self, entry, node, version, rev):
-        p = INDEX_ENTRY_V2.pack(*entry)
-        if rev == 0:
-            p = INDEX_HEADER.pack(version) + p[4:]
-        return p
-
-
-NodemapRevlogIO = None
-
-if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
-
-    class NodemapRevlogIO(revlogio):
-        """A debug oriented IO class that return a PersistentNodeMapIndexObject
-
-        The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
-        """
-
-        def parseindex(self, data, inline):
-            index, cache = parsers.parse_index_devel_nodemap(data, inline)
-            return index, cache
-
-
-class rustrevlogio(revlogio):
-    def parseindex(self, data, inline):
-        index, cache = super(rustrevlogio, self).parseindex(data, inline)
-        return rustrevlog.MixedIndex(index), cache
-
-
 class revlog(object):
     """
     the underlying revision storage object
@@ -636,7 +497,7 @@
         devel_nodemap = (
             self.nodemap_file
             and opts.get(b'devel-force-nodemap', False)
-            and NodemapRevlogIO is not None
+            and parse_index_v1_nodemap is not None
         )
 
         use_rust_index = False
@@ -646,17 +507,17 @@
             else:
                 use_rust_index = self.opener.options.get(b'rust.index')
 
-        self._io = revlogio()
+        self._parse_index = parse_index_v1
         if self.version == REVLOGV0:
-            self._io = revlogoldio()
+            self._parse_index = revlogv0.parse_index_v0
         elif fmt == REVLOGV2:
-            self._io = revlogv2io()
+            self._parse_index = parse_index_v2
         elif devel_nodemap:
-            self._io = NodemapRevlogIO()
+            self._parse_index = parse_index_v1_nodemap
         elif use_rust_index:
-            self._io = rustrevlogio()
+            self._parse_index = parse_index_v1_mixed
         try:
-            d = self._io.parseindex(indexdata, self._inline)
+            d = self._parse_index(indexdata, self._inline)
             index, _chunkcache = d
             use_nodemap = (
                 not self._inline
@@ -818,7 +679,10 @@
             raise
         except error.RevlogError:
             # parsers.c radix tree lookup failed
-            if node == wdirid or node in wdirfilenodeids:
+            if (
+                node == self.nodeconstants.wdirid
+                or node in self.nodeconstants.wdirfilenodeids
+            ):
                 raise error.WdirUnsupported
             raise error.LookupError(node, self.indexfile, _(b'no node'))
 
@@ -909,7 +773,7 @@
         i = self.index
         d = i[self.rev(node)]
         # inline node() to avoid function call overhead
-        if d[5] == nullid:
+        if d[5] == self.nullid:
             return i[d[6]][7], i[d[5]][7]
         else:
             return i[d[5]][7], i[d[6]][7]
@@ -1027,7 +891,7 @@
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
         if common is None:
-            common = [nullid]
+            common = [self.nullid]
         if heads is None:
             heads = self.heads()
 
@@ -1133,7 +997,7 @@
         not supplied, uses all of the revlog's heads.  If common is not
         supplied, uses nullid."""
         if common is None:
-            common = [nullid]
+            common = [self.nullid]
         if heads is None:
             heads = self.heads()
 
@@ -1171,11 +1035,15 @@
                 return nonodes
             lowestrev = min([self.rev(n) for n in roots])
         else:
-            roots = [nullid]  # Everybody's a descendant of nullid
+            roots = [self.nullid]  # Everybody's a descendant of nullid
             lowestrev = nullrev
         if (lowestrev == nullrev) and (heads is None):
             # We want _all_ the nodes!
-            return ([self.node(r) for r in self], [nullid], list(self.heads()))
+            return (
+                [self.node(r) for r in self],
+                [self.nullid],
+                list(self.heads()),
+            )
         if heads is None:
             # All nodes are ancestors, so the latest ancestor is the last
             # node.
@@ -1201,7 +1069,7 @@
                 # grab a node to tag
                 n = nodestotag.pop()
                 # Never tag nullid
-                if n == nullid:
+                if n == self.nullid:
                     continue
                 # A node's revision number represents its place in a
                 # topologically sorted list of nodes.
@@ -1213,7 +1081,7 @@
                         ancestors.add(n)  # Mark as ancestor
                         # Add non-nullid parents to list of nodes to tag.
                         nodestotag.update(
-                            [p for p in self.parents(n) if p != nullid]
+                            [p for p in self.parents(n) if p != self.nullid]
                         )
                     elif n in heads:  # We've seen it before, is it a fake head?
                         # So it is, real heads should not be the ancestors of
@@ -1241,7 +1109,7 @@
                 # We are descending from nullid, and don't need to care about
                 # any other roots.
                 lowestrev = nullrev
-                roots = [nullid]
+                roots = [self.nullid]
         # Transform our roots list into a set.
         descendants = set(roots)
         # Also, keep the original roots so we can filter out roots that aren't
@@ -1335,7 +1203,7 @@
         """
         if start is None and stop is None:
             if not len(self):
-                return [nullid]
+                return [self.nullid]
             return [self.node(r) for r in self.headrevs()]
 
         if start is None:
@@ -1425,13 +1293,13 @@
         if ancs:
             # choose a consistent winner when there's a tie
             return min(map(self.node, ancs))
-        return nullid
+        return self.nullid
 
     def _match(self, id):
         if isinstance(id, int):
             # rev
             return self.node(id)
-        if len(id) == 20:
+        if len(id) == self.nodeconstants.nodelen:
             # possibly a binary node
             # odds of a binary node being all hex in ASCII are 1 in 10**25
             try:
@@ -1452,7 +1320,7 @@
             return self.node(rev)
         except (ValueError, OverflowError):
             pass
-        if len(id) == 40:
+        if len(id) == 2 * self.nodeconstants.nodelen:
             try:
                 # a full hex nodeid?
                 node = bin(id)
@@ -1463,7 +1331,7 @@
 
     def _partialmatch(self, id):
         # we don't care wdirfilenodeids as they should be always full hash
-        maybewdir = wdirhex.startswith(id)
+        maybewdir = self.nodeconstants.wdirhex.startswith(id)
         try:
             partial = self.index.partialmatch(id)
             if partial and self.hasnode(partial):
@@ -1499,8 +1367,8 @@
                 nl = [
                     n for n in nl if hex(n).startswith(id) and self.hasnode(n)
                 ]
-                if nullhex.startswith(id):
-                    nl.append(nullid)
+                if self.nodeconstants.nullhex.startswith(id):
+                    nl.append(self.nullid)
                 if len(nl) > 0:
                     if len(nl) == 1 and not maybewdir:
                         self._pcache[id] = nl[0]
@@ -1560,13 +1428,13 @@
                 length = max(self.index.shortest(node), minlength)
                 return disambiguate(hexnode, length)
             except error.RevlogError:
-                if node != wdirid:
+                if node != self.nodeconstants.wdirid:
                     raise error.LookupError(node, self.indexfile, _(b'no node'))
             except AttributeError:
                 # Fall through to pure code
                 pass
 
-        if node == wdirid:
+        if node == self.nodeconstants.wdirid:
             for length in range(minlength, len(hexnode) + 1):
                 prefix = hexnode[:length]
                 if isvalid(prefix):
@@ -1881,7 +1749,7 @@
             rev = None
 
         # fast path the special `nullid` rev
-        if node == nullid:
+        if node == self.nullid:
             return b"", {}
 
         # ``rawtext`` is the text as stored inside the revlog. Might be the
@@ -2064,9 +1932,11 @@
         with self._indexfp(b'w') as fp:
             self.version &= ~FLAG_INLINE_DATA
             self._inline = False
-            io = self._io
             for i in self:
-                e = io.packentry(self.index[i], self.node, self.version, i)
+                e = self.index.entry_binary(i)
+                if i == 0:
+                    header = self.index.pack_header(self.version)
+                    e = header + e
                 fp.write(e)
 
             # the temp file replace the real index when we exit the context
@@ -2302,11 +2172,14 @@
         - rawtext is optional (can be None); if not set, cachedelta must be set.
           if both are set, they must correspond to each other.
         """
-        if node == nullid:
+        if node == self.nullid:
             raise error.RevlogError(
                 _(b"%s: attempt to add null revision") % self.indexfile
             )
-        if node == wdirid or node in wdirfilenodeids:
+        if (
+            node == self.nodeconstants.wdirid
+            or node in self.nodeconstants.wdirfilenodeids
+        ):
             raise error.RevlogError(
                 _(b"%s: attempt to add wdir revision") % self.indexfile
             )
@@ -2385,7 +2258,10 @@
             e = e[:8]
 
         self.index.append(e)
-        entry = self._io.packentry(e, self.node, self.version, curr)
+        entry = self.index.entry_binary(curr)
+        if curr == 0:
+            header = self.index.pack_header(self.version)
+            entry = header + entry
         self._writeentry(
             transaction,
             ifh,
@@ -2996,7 +2872,7 @@
         newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
         newrl.version = self.version
         newrl._generaldelta = self._generaldelta
-        newrl._io = self._io
+        newrl._parse_index = self._parse_index
 
         for rev in self.revs():
             node = self.node(rev)
@@ -3238,5 +3114,8 @@
             for i, entry in enumerate(new_entries):
                 rev = startrev + i
                 self.index.replace_sidedata_info(rev, entry[8], entry[9])
-                packed = self._io.packentry(entry, self.node, self.version, rev)
+                packed = self.index.entry_binary(rev)
+                if rev == 0:
+                    header = self.index.pack_header(self.version)
+                    packed = header + packed
                 fp.write(packed)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/revlogv0.py	Mon May 03 18:55:19 2021 +0200
@@ -0,0 +1,144 @@
+# revlogv0 - code related to revlog format "V0"
+#
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+
+from ..node import sha1nodeconstants
+from .constants import (
+    INDEX_ENTRY_V0,
+)
+from ..i18n import _
+
+from .. import (
+    error,
+    node,
+    pycompat,
+    util,
+)
+
+from . import (
+    flagutil,
+    nodemap as nodemaputil,
+)
+
+
+def getoffset(q):
+    return int(q >> 16)
+
+
+def gettype(q):
+    return int(q & 0xFFFF)
+
+
+def offset_type(offset, type):
+    if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
+        raise ValueError(b'unknown revlog index flags')
+    return int(int(offset) << 16 | type)
+
+
+class revlogoldindex(list):
+    entry_size = INDEX_ENTRY_V0.size
+
+    @property
+    def nodemap(self):
+        msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+        util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+        return self._nodemap
+
+    @util.propertycache
+    def _nodemap(self):
+        nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
+        for r in range(0, len(self)):
+            n = self[r][7]
+            nodemap[n] = r
+        return nodemap
+
+    def has_node(self, node):
+        """return True if the node exist in the index"""
+        return node in self._nodemap
+
+    def rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, raise a RevlogError"""
+        return self._nodemap[node]
+
+    def get_rev(self, node):
+        """return a revision for a node
+
+        If the node is unknown, return None"""
+        return self._nodemap.get(node)
+
+    def append(self, tup):
+        self._nodemap[tup[7]] = len(self)
+        super(revlogoldindex, self).append(tup)
+
+    def __delitem__(self, i):
+        if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
+            raise ValueError(b"deleting slices only supports a:-1 with step 1")
+        for r in pycompat.xrange(i.start, len(self)):
+            del self._nodemap[self[r][7]]
+        super(revlogoldindex, self).__delitem__(i)
+
+    def clearcaches(self):
+        self.__dict__.pop('_nodemap', None)
+
+    def __getitem__(self, i):
+        if i == -1:
+            return (0, 0, 0, -1, -1, -1, -1, node.nullid)
+        return list.__getitem__(self, i)
+
+    def pack_header(self, header):
+        """pack header information in binary"""
+        return b''
+
+    def entry_binary(self, rev):
+        """return the raw binary string representing a revision"""
+        entry = self[rev]
+        if gettype(entry[0]):
+            raise error.RevlogError(
+                _(b'index entry flags need revlog version 1')
+            )
+        e2 = (
+            getoffset(entry[0]),
+            entry[1],
+            entry[3],
+            entry[4],
+            self[entry[5]][7],
+            self[entry[6]][7],
+            entry[7],
+        )
+        return INDEX_ENTRY_V0.pack(*e2)
+
+
+def parse_index_v0(data, inline):
+    s = INDEX_ENTRY_V0.size
+    index = []
+    nodemap = nodemaputil.NodeMap({node.nullid: node.nullrev})
+    n = off = 0
+    l = len(data)
+    while off + s <= l:
+        cur = data[off : off + s]
+        off += s
+        e = INDEX_ENTRY_V0.unpack(cur)
+        # transform to revlogv1 format
+        e2 = (
+            offset_type(e[0], 0),
+            e[1],
+            -1,
+            e[2],
+            e[3],
+            nodemap.get(e[4], node.nullrev),
+            nodemap.get(e[5], node.nullrev),
+            e[6],
+        )
+        index.append(e2)
+        nodemap[e[6]] = n
+        n += 1
+
+    index = revlogoldindex(index)
+    return index, None
--- a/mercurial/revset.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/revset.py	Mon May 03 18:55:19 2021 +0200
@@ -1724,7 +1724,7 @@
 def _node(repo, n):
     """process a node input"""
     rn = None
-    if len(n) == 40:
+    if len(n) == 2 * repo.nodeconstants.nodelen:
         try:
             rn = repo.changelog.rev(bin(n))
         except error.WdirUnsupported:
--- a/mercurial/rewriteutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/rewriteutil.py	Mon May 03 18:55:19 2021 +0200
@@ -21,6 +21,7 @@
     obsutil,
     revset,
     scmutil,
+    util,
 )
 
 
@@ -34,12 +35,18 @@
     Make sure this function is called after taking the lock.
     """
     if nullrev in revs:
-        msg = _(b"cannot %s null changeset") % action
+        msg = _(b"cannot %s the null revision") % action
         hint = _(b"no changeset checked out")
         raise error.InputError(msg, hint=hint)
 
+    if any(util.safehasattr(r, 'rev') for r in revs):
+        repo.ui.develwarn(b"rewriteutil.precheck called with ctx not revs")
+        revs = (r.rev() for r in revs)
+
     if len(repo[None].parents()) > 1:
-        raise error.StateError(_(b"cannot %s while merging") % action)
+        raise error.StateError(
+            _(b"cannot %s changesets while merging") % action
+        )
 
     publicrevs = repo.revs(b'%ld and public()', revs)
     if publicrevs:
@@ -49,7 +56,42 @@
 
     newunstable = disallowednewunstable(repo, revs)
     if newunstable:
-        raise error.InputError(_(b"cannot %s changeset with children") % action)
+        hint = _(b"see 'hg help evolution.instability'")
+        raise error.InputError(
+            _(b"cannot %s changeset with children") % action, hint=hint
+        )
+
+    if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
+        new_divergence = _find_new_divergence(repo, revs)
+        if new_divergence:
+            local_ctx, other_ctx, base_ctx = new_divergence
+            msg = _(
+                b'cannot %s %s, as that creates content-divergence with %s'
+            ) % (
+                action,
+                local_ctx,
+                other_ctx,
+            )
+            if local_ctx.rev() != base_ctx.rev():
+                msg += _(b', from %s') % base_ctx
+            if repo.ui.verbose:
+                if local_ctx.rev() != base_ctx.rev():
+                    msg += _(
+                        b'\n    changeset %s is a successor of ' b'changeset %s'
+                    ) % (local_ctx, base_ctx)
+                msg += _(
+                    b'\n    changeset %s already has a successor in '
+                    b'changeset %s\n'
+                    b'    rewriting changeset %s would create '
+                    b'"content-divergence"\n'
+                    b'    set experimental.evolution.allowdivergence=True to '
+                    b'skip this check'
+                ) % (base_ctx, other_ctx, local_ctx)
+                raise error.InputError(msg)
+            else:
+                raise error.InputError(
+                    msg, hint=_(b"add --verbose for details")
+                )
 
 
 def disallowednewunstable(repo, revs):
@@ -65,6 +107,40 @@
     return repo.revs(b"(%ld::) - %ld", revs, revs)
 
 
+def _find_new_divergence(repo, revs):
+    obsrevs = repo.revs(b'%ld and obsolete()', revs)
+    for r in obsrevs:
+        div = find_new_divergence_from(repo, repo[r])
+        if div:
+            return (repo[r], repo[div[0]], repo[div[1]])
+    return None
+
+
+def find_new_divergence_from(repo, ctx):
+    """return divergent revision if rewriting an obsolete cset (ctx) will
+    create divergence
+
+    Returns (<other node>, <common ancestor node>) or None
+    """
+    if not ctx.obsolete():
+        return None
+    # We need to check two cases that can cause divergence:
+    # case 1: the rev being rewritten has a non-obsolete successor (easily
+    #     detected by successorssets)
+    sset = obsutil.successorssets(repo, ctx.node())
+    if sset:
+        return (sset[0][0], ctx.node())
+    else:
+        # case 2: one of the precursors of the rev being revived has a
+        #     non-obsolete successor (we need divergentsets for this)
+        divsets = obsutil.divergentsets(repo, ctx)
+        if divsets:
+            nsuccset = divsets[0][b'divergentnodes']
+            prec = divsets[0][b'commonpredecessor']
+            return (nsuccset[0], prec)
+        return None
+
+
 def skip_empty_successor(ui, command):
     empty_successor = ui.config(b'rewrite', b'empty-successor')
     if empty_successor == b'skip':
--- a/mercurial/scmutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/scmutil.py	Mon May 03 18:55:19 2021 +0200
@@ -19,10 +19,8 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
-    wdirid,
     wdirrev,
 )
 from .pycompat import getattr
@@ -450,7 +448,7 @@
     """Return binary node id for a given basectx"""
     node = ctx.node()
     if node is None:
-        return wdirid
+        return ctx.repo().nodeconstants.wdirid
     return node
 
 
@@ -645,7 +643,7 @@
         except (ValueError, OverflowError, IndexError):
             pass
 
-        if len(symbol) == 40:
+        if len(symbol) == 2 * repo.nodeconstants.nodelen:
             try:
                 node = bin(symbol)
                 rev = repo.changelog.rev(node)
@@ -1108,7 +1106,7 @@
                     if roots:
                         newnode = roots[0].node()
                     else:
-                        newnode = nullid
+                        newnode = repo.nullid
                 else:
                     newnode = newnodes[0]
                 moves[oldnode] = newnode
@@ -1506,7 +1504,7 @@
     oldctx = repo[b'.']
     ds = repo.dirstate
     copies = dict(ds.copies())
-    ds.setparents(newctx.node(), nullid)
+    ds.setparents(newctx.node(), repo.nullid)
     s = newctx.status(oldctx, match=match)
     for f in s.modified:
         if ds[f] == b'r':
--- a/mercurial/setdiscovery.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/setdiscovery.py	Mon May 03 18:55:19 2021 +0200
@@ -46,10 +46,7 @@
 import random
 
 from .i18n import _
-from .node import (
-    nullid,
-    nullrev,
-)
+from .node import nullrev
 from . import (
     error,
     policy,
@@ -391,9 +388,9 @@
             audit[b'total-roundtrips'] = 1
 
         if cl.tiprev() == nullrev:
-            if srvheadhashes != [nullid]:
-                return [nullid], True, srvheadhashes
-            return [nullid], False, []
+            if srvheadhashes != [cl.nullid]:
+                return [cl.nullid], True, srvheadhashes
+            return [cl.nullid], False, []
     else:
         # we still need the remote head for the function return
         with remote.commandexecutor() as e:
@@ -406,7 +403,7 @@
 
     knownsrvheads = []  # revnos of remote heads that are known locally
     for node in srvheadhashes:
-        if node == nullid:
+        if node == cl.nullid:
             continue
 
         try:
@@ -503,17 +500,17 @@
     if audit is not None:
         audit[b'total-roundtrips'] = roundtrips
 
-    if not result and srvheadhashes != [nullid]:
+    if not result and srvheadhashes != [cl.nullid]:
         if abortwhenunrelated:
             raise error.Abort(_(b"repository is unrelated"))
         else:
             ui.warn(_(b"warning: repository is unrelated\n"))
         return (
-            {nullid},
+            {cl.nullid},
             True,
             srvheadhashes,
         )
 
-    anyincoming = srvheadhashes != [nullid]
+    anyincoming = srvheadhashes != [cl.nullid]
     result = {clnode(r) for r in result}
     return result, anyincoming, srvheadhashes
--- a/mercurial/shelve.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/shelve.py	Mon May 03 18:55:19 2021 +0200
@@ -31,7 +31,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from . import (
@@ -822,7 +821,7 @@
         pendingctx = state.pendingctx
 
         with repo.dirstate.parentchange():
-            repo.setparents(state.pendingctx.node(), nullid)
+            repo.setparents(state.pendingctx.node(), repo.nullid)
             repo.dirstate.write(repo.currenttransaction())
 
         targetphase = phases.internal
@@ -831,7 +830,7 @@
         overrides = {(b'phases', b'new-commit'): targetphase}
         with repo.ui.configoverride(overrides, b'unshelve'):
             with repo.dirstate.parentchange():
-                repo.setparents(state.parents[0], nullid)
+                repo.setparents(state.parents[0], repo.nullid)
                 newnode, ispartialunshelve = _createunshelvectx(
                     ui, repo, shelvectx, basename, interactive, opts
                 )
@@ -1027,7 +1026,7 @@
             raise error.ConflictResolutionRequired(b'unshelve')
 
         with repo.dirstate.parentchange():
-            repo.setparents(tmpwctx.node(), nullid)
+            repo.setparents(tmpwctx.node(), repo.nullid)
             newnode, ispartialunshelve = _createunshelvectx(
                 ui, repo, shelvectx, basename, interactive, opts
             )
--- a/mercurial/sparse.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/sparse.py	Mon May 03 18:55:19 2021 +0200
@@ -10,10 +10,7 @@
 import os
 
 from .i18n import _
-from .node import (
-    hex,
-    nullid,
-)
+from .node import hex
 from . import (
     error,
     match as matchmod,
@@ -177,7 +174,7 @@
     revs = [
         repo.changelog.rev(node)
         for node in repo.dirstate.parents()
-        if node != nullid
+        if node != repo.nullid
     ]
 
     allincludes = set()
@@ -321,7 +318,7 @@
         revs = [
             repo.changelog.rev(node)
             for node in repo.dirstate.parents()
-            if node != nullid
+            if node != repo.nullid
         ]
 
     signature = configsignature(repo, includetemp=includetemp)
--- a/mercurial/strip.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/strip.py	Mon May 03 18:55:19 2021 +0200
@@ -2,7 +2,6 @@
 
 from .i18n import _
 from .pycompat import getattr
-from .node import nullid
 from . import (
     bookmarks as bookmarksmod,
     cmdutil,
@@ -39,7 +38,7 @@
 
     if (
         util.safehasattr(repo, b'mq')
-        and p2 != nullid
+        and p2 != repo.nullid
         and p2 in [x.node for x in repo.mq.applied]
     ):
         unode = p2
@@ -218,7 +217,7 @@
         # if one of the wdir parent is stripped we'll need
         # to update away to an earlier revision
         update = any(
-            p != nullid and cl.rev(p) in strippedrevs
+            p != repo.nullid and cl.rev(p) in strippedrevs
             for p in repo.dirstate.parents()
         )
 
--- a/mercurial/subrepo.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/subrepo.py	Mon May 03 18:55:19 2021 +0200
@@ -21,7 +21,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     short,
 )
 from . import (
@@ -686,7 +685,7 @@
         # we can't fully delete the repository as it may contain
         # local-only history
         self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
-        hg.clean(self._repo, nullid, False)
+        hg.clean(self._repo, self._repo.nullid, False)
 
     def _get(self, state):
         source, revision, kind = state
--- a/mercurial/tagmerge.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/tagmerge.py	Mon May 03 18:55:19 2021 +0200
@@ -74,9 +74,6 @@
 from __future__ import absolute_import
 
 from .i18n import _
-from .node import (
-    nullhex,
-)
 from . import (
     tags as tagsmod,
     util,
@@ -243,8 +240,8 @@
         pnlosttagset = basetagset - pntagset
         for t in pnlosttagset:
             pntags[t] = basetags[t]
-            if pntags[t][-1][0] != nullhex:
-                pntags[t].append([nullhex, None])
+            if pntags[t][-1][0] != repo.nodeconstants.nullhex:
+                pntags[t].append([repo.nodeconstants.nullhex, None])
 
     conflictedtags = []  # for reporting purposes
     mergedtags = util.sortdict(p1tags)
--- a/mercurial/tags.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/tags.py	Mon May 03 18:55:19 2021 +0200
@@ -18,7 +18,6 @@
 from .node import (
     bin,
     hex,
-    nullid,
     nullrev,
     short,
 )
@@ -96,12 +95,12 @@
     return fnodes
 
 
-def _nulltonone(value):
+def _nulltonone(repo, value):
     """convert nullid to None
 
     For tag value, nullid means "deleted". This small utility function helps
     translating that to None."""
-    if value == nullid:
+    if value == repo.nullid:
         return None
     return value
 
@@ -123,14 +122,14 @@
     # list of (tag, old, new): None means missing
     entries = []
     for tag, (new, __) in newtags.items():
-        new = _nulltonone(new)
+        new = _nulltonone(repo, new)
         old, __ = oldtags.pop(tag, (None, None))
-        old = _nulltonone(old)
+        old = _nulltonone(repo, old)
         if old != new:
             entries.append((tag, old, new))
     # handle deleted tags
     for tag, (old, __) in oldtags.items():
-        old = _nulltonone(old)
+        old = _nulltonone(repo, old)
         if old is not None:
             entries.append((tag, old, None))
     entries.sort()
@@ -452,7 +451,7 @@
     repoheads = repo.heads()
     # Case 2 (uncommon): empty repo; get out quickly and don't bother
     # writing an empty cache.
-    if repoheads == [nullid]:
+    if repoheads == [repo.nullid]:
         return ([], {}, valid, {}, False)
 
     # Case 3 (uncommon): cache file missing or empty.
@@ -499,7 +498,7 @@
     for node in nodes:
         fnode = fnodescache.getfnode(node)
         flog = repo.file(b'.hgtags')
-        if fnode != nullid:
+        if fnode != repo.nullid:
             if fnode not in validated_fnodes:
                 if flog.hasnode(fnode):
                     validated_fnodes.add(fnode)
@@ -510,7 +509,7 @@
     if unknown_entries:
         fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
         for node, fnode in pycompat.iteritems(fixed_nodemap):
-            if fnode != nullid:
+            if fnode != repo.nullid:
                 cachefnode[node] = fnode
 
     fnodescache.write()
@@ -632,7 +631,7 @@
                 m = name
 
             if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
-                old = repo.tags().get(name, nullid)
+                old = repo.tags().get(name, repo.nullid)
                 fp.write(b'%s %s\n' % (hex(old), m))
             fp.write(b'%s %s\n' % (hex(node), m))
         fp.close()
@@ -762,8 +761,8 @@
         If an .hgtags does not exist at the specified revision, nullid is
         returned.
         """
-        if node == nullid:
-            return nullid
+        if node == self._repo.nullid:
+            return node
 
         ctx = self._repo[node]
         rev = ctx.rev()
@@ -826,7 +825,7 @@
                 fnode = ctx.filenode(b'.hgtags')
             except error.LookupError:
                 # No .hgtags file on this revision.
-                fnode = nullid
+                fnode = self._repo.nullid
         return fnode
 
     def setfnode(self, node, fnode):
--- a/mercurial/templatefuncs.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/templatefuncs.py	Mon May 03 18:55:19 2021 +0200
@@ -10,10 +10,7 @@
 import re
 
 from .i18n import _
-from .node import (
-    bin,
-    wdirid,
-)
+from .node import bin
 from . import (
     color,
     dagop,
@@ -767,9 +764,10 @@
         )
 
     repo = context.resource(mapping, b'repo')
-    if len(hexnode) > 40:
+    hexnodelen = 2 * repo.nodeconstants.nodelen
+    if len(hexnode) > hexnodelen:
         return hexnode
-    elif len(hexnode) == 40:
+    elif len(hexnode) == hexnodelen:
         try:
             node = bin(hexnode)
         except TypeError:
@@ -778,7 +776,7 @@
         try:
             node = scmutil.resolvehexnodeidprefix(repo, hexnode)
         except error.WdirUnsupported:
-            node = wdirid
+            node = repo.nodeconstants.wdirid
         except error.LookupError:
             return hexnode
         if not node:
--- a/mercurial/templatekw.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/templatekw.py	Mon May 03 18:55:19 2021 +0200
@@ -10,8 +10,6 @@
 from .i18n import _
 from .node import (
     hex,
-    nullid,
-    wdirid,
     wdirrev,
 )
 
@@ -29,7 +27,10 @@
     templateutil,
     util,
 )
-from .utils import stringutil
+from .utils import (
+    stringutil,
+    urlutil,
+)
 
 _hybrid = templateutil.hybrid
 hybriddict = templateutil.hybriddict
@@ -412,7 +413,7 @@
 
 def getgraphnodecurrent(repo, ctx, cache):
     wpnodes = repo.dirstate.parents()
-    if wpnodes[1] == nullid:
+    if wpnodes[1] == repo.nullid:
         wpnodes = wpnodes[:1]
     if ctx.node() in wpnodes:
         return b'@'
@@ -525,11 +526,12 @@
     ctx = context.resource(mapping, b'ctx')
     mnode = ctx.manifestnode()
     if mnode is None:
-        mnode = wdirid
+        mnode = repo.nodeconstants.wdirid
         mrev = wdirrev
+        mhex = repo.nodeconstants.wdirhex
     else:
         mrev = repo.manifestlog.rev(mnode)
-    mhex = hex(mnode)
+        mhex = hex(mnode)
     mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
     f = context.process(b'manifest', mapping)
     return templateutil.hybriditem(
@@ -661,9 +663,8 @@
     repo = context.resource(mapping, b'repo')
     # see commands.paths() for naming of dictionary keys
     paths = repo.ui.paths
-    urls = util.sortdict(
-        (k, p.rawloc) for k, p in sorted(pycompat.iteritems(paths))
-    )
+    all_paths = urlutil.list_paths(repo.ui)
+    urls = util.sortdict((k, p.rawloc) for k, p in all_paths)
 
     def makemap(k):
         p = paths[k]
@@ -671,7 +672,10 @@
         d.update((o, v) for o, v in sorted(pycompat.iteritems(p.suboptions)))
         return d
 
-    return _hybrid(None, urls, makemap, lambda k: b'%s=%s' % (k, urls[k]))
+    def format_one(k):
+        return b'%s=%s' % (k, urls[k])
+
+    return _hybrid(None, urls, makemap, format_one)
 
 
 @templatekeyword(b"predecessors", requires={b'repo', b'ctx'})
--- a/mercurial/testing/storage.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/testing/storage.py	Mon May 03 18:55:19 2021 +0200
@@ -11,7 +11,6 @@
 
 from ..node import (
     hex,
-    nullid,
     nullrev,
 )
 from ..pycompat import getattr
@@ -51,7 +50,7 @@
         self.assertFalse(f.hasnode(None))
         self.assertFalse(f.hasnode(0))
         self.assertFalse(f.hasnode(nullrev))
-        self.assertFalse(f.hasnode(nullid))
+        self.assertFalse(f.hasnode(f.nullid))
         self.assertFalse(f.hasnode(b'0'))
         self.assertFalse(f.hasnode(b'a' * 20))
 
@@ -64,8 +63,8 @@
 
         self.assertEqual(list(f.revs(start=20)), [])
 
-        # parents() and parentrevs() work with nullid/nullrev.
-        self.assertEqual(f.parents(nullid), (nullid, nullid))
+        # parents() and parentrevs() work with f.nullid/nullrev.
+        self.assertEqual(f.parents(f.nullid), (f.nullid, f.nullid))
         self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
 
         with self.assertRaises(error.LookupError):
@@ -78,9 +77,9 @@
             with self.assertRaises(IndexError):
                 f.parentrevs(i)
 
-        # nullid/nullrev lookup always works.
-        self.assertEqual(f.rev(nullid), nullrev)
-        self.assertEqual(f.node(nullrev), nullid)
+        # f.nullid/nullrev lookup always works.
+        self.assertEqual(f.rev(f.nullid), nullrev)
+        self.assertEqual(f.node(nullrev), f.nullid)
 
         with self.assertRaises(error.LookupError):
             f.rev(b'\x01' * 20)
@@ -92,16 +91,16 @@
             with self.assertRaises(IndexError):
                 f.node(i)
 
-        self.assertEqual(f.lookup(nullid), nullid)
-        self.assertEqual(f.lookup(nullrev), nullid)
-        self.assertEqual(f.lookup(hex(nullid)), nullid)
-        self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
+        self.assertEqual(f.lookup(f.nullid), f.nullid)
+        self.assertEqual(f.lookup(nullrev), f.nullid)
+        self.assertEqual(f.lookup(hex(f.nullid)), f.nullid)
+        self.assertEqual(f.lookup(b'%d' % nullrev), f.nullid)
 
         with self.assertRaises(error.LookupError):
             f.lookup(b'badvalue')
 
         with self.assertRaises(error.LookupError):
-            f.lookup(hex(nullid)[0:12])
+            f.lookup(hex(f.nullid)[0:12])
 
         with self.assertRaises(error.LookupError):
             f.lookup(b'-2')
@@ -140,19 +139,19 @@
             with self.assertRaises(IndexError):
                 f.iscensored(i)
 
-        self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
+        self.assertEqual(list(f.commonancestorsheads(f.nullid, f.nullid)), [])
 
         with self.assertRaises(ValueError):
             self.assertEqual(list(f.descendants([])), [])
 
         self.assertEqual(list(f.descendants([nullrev])), [])
 
-        self.assertEqual(f.heads(), [nullid])
-        self.assertEqual(f.heads(nullid), [nullid])
-        self.assertEqual(f.heads(None, [nullid]), [nullid])
-        self.assertEqual(f.heads(nullid, [nullid]), [nullid])
+        self.assertEqual(f.heads(), [f.nullid])
+        self.assertEqual(f.heads(f.nullid), [f.nullid])
+        self.assertEqual(f.heads(None, [f.nullid]), [f.nullid])
+        self.assertEqual(f.heads(f.nullid, [f.nullid]), [f.nullid])
 
-        self.assertEqual(f.children(nullid), [])
+        self.assertEqual(f.children(f.nullid), [])
 
         with self.assertRaises(error.LookupError):
             f.children(b'\x01' * 20)
@@ -160,7 +159,7 @@
     def testsinglerevision(self):
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node = f.add(b'initial', None, tr, 0, nullid, nullid)
+            node = f.add(b'initial', None, tr, 0, f.nullid, f.nullid)
 
         self.assertEqual(len(f), 1)
         self.assertEqual(list(f), [0])
@@ -174,7 +173,7 @@
         self.assertTrue(f.hasnode(node))
         self.assertFalse(f.hasnode(hex(node)))
         self.assertFalse(f.hasnode(nullrev))
-        self.assertFalse(f.hasnode(nullid))
+        self.assertFalse(f.hasnode(f.nullid))
         self.assertFalse(f.hasnode(node[0:12]))
         self.assertFalse(f.hasnode(hex(node)[0:20]))
 
@@ -188,7 +187,7 @@
         self.assertEqual(list(f.revs(1, 0)), [1, 0])
         self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
 
-        self.assertEqual(f.parents(node), (nullid, nullid))
+        self.assertEqual(f.parents(node), (f.nullid, f.nullid))
         self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
 
         with self.assertRaises(error.LookupError):
@@ -209,7 +208,7 @@
 
         self.assertEqual(f.lookup(node), node)
         self.assertEqual(f.lookup(0), node)
-        self.assertEqual(f.lookup(-1), nullid)
+        self.assertEqual(f.lookup(-1), f.nullid)
         self.assertEqual(f.lookup(b'0'), node)
         self.assertEqual(f.lookup(hex(node)), node)
 
@@ -256,9 +255,9 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
-            node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+            node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
 
         self.assertEqual(len(f), 3)
         self.assertEqual(list(f), [0, 1, 2])
@@ -284,9 +283,9 @@
         # TODO this is wrong
         self.assertEqual(list(f.revs(3, 2)), [3, 2])
 
-        self.assertEqual(f.parents(node0), (nullid, nullid))
-        self.assertEqual(f.parents(node1), (node0, nullid))
-        self.assertEqual(f.parents(node2), (node1, nullid))
+        self.assertEqual(f.parents(node0), (f.nullid, f.nullid))
+        self.assertEqual(f.parents(node1), (node0, f.nullid))
+        self.assertEqual(f.parents(node2), (node1, f.nullid))
 
         self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
         self.assertEqual(f.parentrevs(1), (0, nullrev))
@@ -330,7 +329,7 @@
         with self.assertRaises(IndexError):
             f.iscensored(3)
 
-        self.assertEqual(f.commonancestorsheads(node1, nullid), [])
+        self.assertEqual(f.commonancestorsheads(node1, f.nullid), [])
         self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
         self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
         self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
@@ -364,12 +363,12 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'1', None, tr, 1, node0, nullid)
-            node2 = f.add(b'2', None, tr, 2, node1, nullid)
-            node3 = f.add(b'3', None, tr, 3, node0, nullid)
-            node4 = f.add(b'4', None, tr, 4, node3, nullid)
-            node5 = f.add(b'5', None, tr, 5, node0, nullid)
+            node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+            node2 = f.add(b'2', None, tr, 2, node1, f.nullid)
+            node3 = f.add(b'3', None, tr, 3, node0, f.nullid)
+            node4 = f.add(b'4', None, tr, 4, node3, f.nullid)
+            node5 = f.add(b'5', None, tr, 5, node0, f.nullid)
 
         self.assertEqual(len(f), 6)
 
@@ -427,24 +426,24 @@
             with self.assertRaises(IndexError):
                 f.size(i)
 
-        self.assertEqual(f.revision(nullid), b'')
-        self.assertEqual(f.rawdata(nullid), b'')
+        self.assertEqual(f.revision(f.nullid), b'')
+        self.assertEqual(f.rawdata(f.nullid), b'')
 
         with self.assertRaises(error.LookupError):
             f.revision(b'\x01' * 20)
 
-        self.assertEqual(f.read(nullid), b'')
+        self.assertEqual(f.read(f.nullid), b'')
 
         with self.assertRaises(error.LookupError):
             f.read(b'\x01' * 20)
 
-        self.assertFalse(f.renamed(nullid))
+        self.assertFalse(f.renamed(f.nullid))
 
         with self.assertRaises(error.LookupError):
             f.read(b'\x01' * 20)
 
-        self.assertTrue(f.cmp(nullid, b''))
-        self.assertTrue(f.cmp(nullid, b'foo'))
+        self.assertTrue(f.cmp(f.nullid, b''))
+        self.assertTrue(f.cmp(f.nullid, b'foo'))
 
         with self.assertRaises(error.LookupError):
             f.cmp(b'\x01' * 20, b'irrelevant')
@@ -455,7 +454,7 @@
             next(gen)
 
         # Emitting null node yields nothing.
-        gen = f.emitrevisions([nullid])
+        gen = f.emitrevisions([f.nullid])
         with self.assertRaises(StopIteration):
             next(gen)
 
@@ -468,7 +467,7 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node = f.add(fulltext, None, tr, 0, nullid, nullid)
+            node = f.add(fulltext, None, tr, 0, f.nullid, f.nullid)
 
         self.assertEqual(f.storageinfo(), {})
         self.assertEqual(
@@ -496,10 +495,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
         self.assertIsNone(rev.delta)
@@ -512,10 +511,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext)
         self.assertIsNone(rev.delta)
@@ -534,9 +533,9 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
-            node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+            node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
 
         self.assertEqual(f.storageinfo(), {})
         self.assertEqual(
@@ -596,10 +595,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext0)
         self.assertIsNone(rev.delta)
@@ -608,7 +607,7 @@
 
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
@@ -622,7 +621,7 @@
 
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
@@ -641,10 +640,10 @@
         rev = next(gen)
 
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext0)
         self.assertIsNone(rev.delta)
@@ -653,7 +652,7 @@
 
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
@@ -667,7 +666,7 @@
 
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertIsNone(rev.linknode)
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
@@ -700,16 +699,16 @@
         rev = next(gen)
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext2)
         self.assertIsNone(rev.delta)
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         # Delta behavior is storage dependent, so we can't easily test it.
 
         with self.assertRaises(StopIteration):
@@ -722,8 +721,8 @@
         rev = next(gen)
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertEqual(rev.revision, fulltext1)
         self.assertIsNone(rev.delta)
@@ -731,7 +730,7 @@
         rev = next(gen)
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertEqual(rev.basenode, node1)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
@@ -751,7 +750,7 @@
         rev = next(gen)
         self.assertEqual(rev.node, node1)
         self.assertEqual(rev.p1node, node0)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertEqual(rev.basenode, node0)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
@@ -768,9 +767,9 @@
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
         self.assertEqual(
@@ -789,9 +788,9 @@
 
         rev = next(gen)
         self.assertEqual(rev.node, node0)
-        self.assertEqual(rev.p1node, nullid)
-        self.assertEqual(rev.p2node, nullid)
-        self.assertEqual(rev.basenode, nullid)
+        self.assertEqual(rev.p1node, f.nullid)
+        self.assertEqual(rev.p2node, f.nullid)
+        self.assertEqual(rev.basenode, f.nullid)
         self.assertIsNone(rev.baserevisionsize)
         self.assertIsNone(rev.revision)
         self.assertEqual(
@@ -802,7 +801,7 @@
         rev = next(gen)
         self.assertEqual(rev.node, node2)
         self.assertEqual(rev.p1node, node1)
-        self.assertEqual(rev.p2node, nullid)
+        self.assertEqual(rev.p2node, f.nullid)
         self.assertEqual(rev.basenode, node0)
 
         with self.assertRaises(StopIteration):
@@ -841,11 +840,11 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
-            node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, meta1, tr, 1, node0, f.nullid)
+            node2 = f.add(fulltext2, meta2, tr, 2, f.nullid, f.nullid)
 
-        # Metadata header isn't recognized when parent isn't nullid.
+        # Metadata header isn't recognized when parent isn't f.nullid.
         self.assertEqual(f.size(1), len(stored1))
         self.assertEqual(f.size(2), len(fulltext2))
 
@@ -886,8 +885,8 @@
 
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
-            node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
+            node0 = f.add(fulltext0, {}, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(fulltext1, meta1, tr, 1, f.nullid, f.nullid)
 
         # TODO this is buggy.
         self.assertEqual(f.size(0), len(fulltext0) + 4)
@@ -916,15 +915,15 @@
         fulltext1 = fulltext0 + b'bar\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         self.assertEqual(len(f), 2)
-        self.assertEqual(f.parents(node1), (node0, nullid))
+        self.assertEqual(f.parents(node1), (node0, f.nullid))
 
         # revision() raises since it performs hash verification.
         with self.assertRaises(error.StorageError):
@@ -951,11 +950,11 @@
         fulltext1 = fulltext0 + b'bar\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         with self.assertRaises(error.StorageError):
@@ -973,11 +972,11 @@
         fulltext1 = fulltext0 + b'bar\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         with self.assertRaises(error.StorageError):
@@ -994,22 +993,22 @@
         fulltext2 = fulltext1 + b'baz\n'
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
             node1 = b'\xaa' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+                f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
             )
 
         with self.assertRaises(error.StorageError):
             f.read(node1)
 
-        node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
+        node2 = storageutil.hashrevisionsha1(fulltext2, node1, f.nullid)
 
         with self._maketransactionfn() as tr:
             delta = mdiff.textdiff(fulltext1, fulltext2)
             self._addrawrevisionfn(
-                f, tr, node2, node1, nullid, 2, delta=(1, delta)
+                f, tr, node2, node1, f.nullid, 2, delta=(1, delta)
             )
 
         self.assertEqual(len(f), 3)
@@ -1029,13 +1028,13 @@
         )
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
 
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, stored1, censored=True
+                f, tr, node1, node0, f.nullid, 1, stored1, censored=True
             )
 
         self.assertTrue(f.iscensored(1))
@@ -1063,13 +1062,13 @@
         )
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
 
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, stored1, censored=True
+                f, tr, node1, node0, f.nullid, 1, stored1, censored=True
             )
 
         with self.assertRaises(error.CensoredNodeError):
@@ -1088,10 +1087,10 @@
     def testaddnoop(self):
         f = self._makefilefn()
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
             # Varying by linkrev shouldn't impact hash.
-            node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
+            node2 = f.add(b'foo', None, tr, 1, f.nullid, f.nullid)
 
         self.assertEqual(node1, node0)
         self.assertEqual(node2, node0)
@@ -1102,7 +1101,9 @@
         with self._maketransactionfn() as tr:
             # Adding a revision with bad node value fails.
             with self.assertRaises(error.StorageError):
-                f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
+                f.addrevision(
+                    b'foo', tr, 0, f.nullid, f.nullid, node=b'\x01' * 20
+                )
 
     def testaddrevisionunknownflag(self):
         f = self._makefilefn()
@@ -1113,7 +1114,7 @@
                     break
 
             with self.assertRaises(error.StorageError):
-                f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
+                f.addrevision(b'foo', tr, 0, f.nullid, f.nullid, flags=flags)
 
     def testaddgroupsimple(self):
         f = self._makefilefn()
@@ -1153,12 +1154,12 @@
         delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+            node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
 
         f = self._makefilefn()
 
         deltas = [
-            (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
+            (node0, f.nullid, f.nullid, f.nullid, f.nullid, delta0, 0, {}),
         ]
 
         with self._maketransactionfn() as tr:
@@ -1207,7 +1208,7 @@
         nodes = []
         with self._maketransactionfn() as tr:
             for fulltext in fulltexts:
-                nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
+                nodes.append(f.add(fulltext, None, tr, 0, f.nullid, f.nullid))
 
         f = self._makefilefn()
         deltas = []
@@ -1215,7 +1216,7 @@
             delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
 
             deltas.append(
-                (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+                (nodes[i], f.nullid, f.nullid, f.nullid, f.nullid, delta, 0, {})
             )
 
         with self._maketransactionfn() as tr:
@@ -1254,18 +1255,18 @@
         )
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+            node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
 
             # The node value doesn't matter since we can't verify it.
             node1 = b'\xbb' * 20
 
             self._addrawrevisionfn(
-                f, tr, node1, node0, nullid, 1, stored1, censored=True
+                f, tr, node1, node0, f.nullid, 1, stored1, censored=True
             )
 
         delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
         deltas = [
-            (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+            (b'\xcc' * 20, node1, f.nullid, b'\x01' * 20, node1, delta, 0, {})
         ]
 
         with self._maketransactionfn() as tr:
@@ -1276,9 +1277,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
-            node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
-            node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
+            node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, f.nullid)
+            node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, f.nullid)
 
         with self._maketransactionfn() as tr:
             f.censorrevision(tr, node1)
@@ -1298,7 +1299,7 @@
 
         with self._maketransactionfn() as tr:
             for rev in range(10):
-                f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
+                f.add(b'%d' % rev, None, tr, rev, f.nullid, f.nullid)
 
         for rev in range(10):
             self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1308,10 +1309,10 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            p1 = nullid
+            p1 = f.nullid
 
             for rev in range(10):
-                f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+                f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
 
         for rev in range(10):
             self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1320,11 +1321,11 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'1', None, tr, 1, node0, nullid)
-            f.add(b'2', None, tr, 2, node1, nullid)
-            f.add(b'3', None, tr, 3, node0, nullid)
-            f.add(b'4', None, tr, 4, node0, nullid)
+            node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+            f.add(b'2', None, tr, 2, node1, f.nullid)
+            f.add(b'3', None, tr, 3, node0, f.nullid)
+            f.add(b'4', None, tr, 4, node0, f.nullid)
 
         for rev in range(5):
             self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1333,9 +1334,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            node0 = f.add(b'0', None, tr, 0, nullid, nullid)
-            f.add(b'1', None, tr, 10, node0, nullid)
-            f.add(b'2', None, tr, 5, node0, nullid)
+            node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            f.add(b'1', None, tr, 10, node0, f.nullid)
+            f.add(b'2', None, tr, 5, node0, f.nullid)
 
         self.assertEqual(f.getstrippoint(0), (0, set()))
         self.assertEqual(f.getstrippoint(1), (1, set()))
@@ -1362,9 +1363,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            p1 = nullid
+            p1 = f.nullid
             for rev in range(10):
-                p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+                p1 = f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
 
         self.assertEqual(len(f), 10)
 
@@ -1377,9 +1378,9 @@
         f = self._makefilefn()
 
         with self._maketransactionfn() as tr:
-            f.add(b'0', None, tr, 0, nullid, nullid)
-            node1 = f.add(b'1', None, tr, 5, nullid, nullid)
-            node2 = f.add(b'2', None, tr, 10, nullid, nullid)
+            f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+            node1 = f.add(b'1', None, tr, 5, f.nullid, f.nullid)
+            node2 = f.add(b'2', None, tr, 10, f.nullid, f.nullid)
 
         self.assertEqual(len(f), 3)
 
--- a/mercurial/treediscovery.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/treediscovery.py	Mon May 03 18:55:19 2021 +0200
@@ -10,10 +10,7 @@
 import collections
 
 from .i18n import _
-from .node import (
-    nullid,
-    short,
-)
+from .node import short
 from . import (
     error,
     pycompat,
@@ -44,11 +41,11 @@
     if audit is not None:
         audit[b'total-roundtrips'] = 1
 
-    if repo.changelog.tip() == nullid:
-        base.add(nullid)
-        if heads != [nullid]:
-            return [nullid], [nullid], list(heads)
-        return [nullid], [], heads
+    if repo.changelog.tip() == repo.nullid:
+        base.add(repo.nullid)
+        if heads != [repo.nullid]:
+            return [repo.nullid], [repo.nullid], list(heads)
+        return [repo.nullid], [], heads
 
     # assume we're closer to the tip than the root
     # and start by examining the heads
@@ -84,7 +81,7 @@
                 continue
 
             repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
-            if n[0] == nullid:  # found the end of the branch
+            if n[0] == repo.nullid:  # found the end of the branch
                 pass
             elif n in seenbranch:
                 repo.ui.debug(b"branch already found\n")
@@ -170,7 +167,7 @@
             raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
 
     base = list(base)
-    if base == [nullid]:
+    if base == [repo.nullid]:
         if force:
             repo.ui.warn(_(b"warning: repository is unrelated\n"))
         else:
--- a/mercurial/ui.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/ui.py	Mon May 03 18:55:19 2021 +0200
@@ -1058,6 +1058,8 @@
 
         This method exist as `getpath` need a ui for potential warning message.
         """
+        msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
+        self.deprecwarn(msg, '6.0')
         return self.paths.getpath(self, *args, **kwargs)
 
     @property
--- a/mercurial/util.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/util.py	Mon May 03 18:55:19 2021 +0200
@@ -34,6 +34,7 @@
 import traceback
 import warnings
 
+from .node import hex
 from .thirdparty import attr
 from .pycompat import (
     delattr,
--- a/mercurial/utils/storageutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/utils/storageutil.py	Mon May 03 18:55:19 2021 +0200
@@ -13,8 +13,8 @@
 from ..i18n import _
 from ..node import (
     bin,
-    nullid,
     nullrev,
+    sha1nodeconstants,
 )
 from .. import (
     dagop,
@@ -26,7 +26,7 @@
 from ..revlogutils import sidedata as sidedatamod
 from ..utils import hashutil
 
-_nullhash = hashutil.sha1(nullid)
+_nullhash = hashutil.sha1(sha1nodeconstants.nullid)
 
 
 def hashrevisionsha1(text, p1, p2):
@@ -37,7 +37,7 @@
     content in the revision graph.
     """
     # As of now, if one of the parent node is null, p2 is null
-    if p2 == nullid:
+    if p2 == sha1nodeconstants.nullid:
         # deep copy of a hash is faster than creating one
         s = _nullhash.copy()
         s.update(p1)
@@ -107,7 +107,7 @@
     Returns ``False`` if the file has no copy metadata. Otherwise a
     2-tuple of the source filename and node.
     """
-    if store.parents(node)[0] != nullid:
+    if store.parents(node)[0] != sha1nodeconstants.nullid:
         return False
 
     meta = parsemeta(store.revision(node))[0]
--- a/mercurial/utils/urlutil.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/utils/urlutil.py	Mon May 03 18:55:19 2021 +0200
@@ -445,6 +445,30 @@
     return bytes(u)
 
 
+def list_paths(ui, target_path=None):
+    """list all the (name, paths) in the passed ui"""
+    if target_path is None:
+        return sorted(pycompat.iteritems(ui.paths))
+    else:
+        path = ui.paths.get(target_path)
+        if path is None:
+            return []
+        else:
+            return [(target_path, path)]
+
+
+def try_path(ui, url):
+    """try to build a path from a url
+
+    Return None if no Path could built.
+    """
+    try:
+        # we pass the ui instance are warning might need to be issued
+        return path(ui, None, rawloc=url)
+    except ValueError:
+        return None
+
+
 def get_push_paths(repo, ui, dests):
     """yields all the `path` selected as push destination by `dests`"""
     if not dests:
@@ -459,7 +483,15 @@
             )
     else:
         for dest in dests:
-            yield ui.getpath(dest)
+            if dest in ui.paths:
+                yield ui.paths[dest]
+            else:
+                path = try_path(ui, dest)
+                if path is None:
+                    msg = _(b'repository %s does not exist')
+                    msg %= dest
+                    raise error.RepoError(msg)
+                yield path
 
 
 def get_pull_paths(repo, ui, sources, default_branches=()):
@@ -471,10 +503,10 @@
             url = ui.paths[source].rawloc
         else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                url = path(ui, None, rawloc=source).rawloc
-            except ValueError:
+            path = try_path(ui, source)
+            if path is not None:
+                url = path.rawloc
+            else:
                 url = source
         yield parseurl(url, default_branches)
 
@@ -520,10 +552,10 @@
             url = ui.paths[source].rawloc
         else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                url = path(ui, None, rawloc=source).rawloc
-            except ValueError:
+            path = try_path(ui, source)
+            if path is not None:
+                url = path.rawloc
+            else:
                 url = source
     return parseurl(url, default_branches)
 
@@ -542,10 +574,10 @@
             url = ui.paths[source].rawloc
         else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                url = path(ui, None, rawloc=source).rawloc
-            except ValueError:
+            path = try_path(ui, source)
+            if path is not None:
+                url = path.rawloc
+            else:
                 url = source
     clone_path, branch = parseurl(url, default_branches)
     return url, clone_path, branch
@@ -590,6 +622,8 @@
         Returns None if ``name`` is not a registered path, a URI, or a local
         path to a repo.
         """
+        msg = b'getpath is deprecated, use `get_*` functions from urlutil'
+        self.deprecwarn(msg, '6.0')
         # Only fall back to default if no path was requested.
         if name is None:
             if not default:
@@ -607,16 +641,14 @@
         # This may need to raise in the future.
         if not name:
             return None
-
-        try:
+        if name in self:
             return self[name]
-        except KeyError:
+        else:
             # Try to resolve as a local path or URI.
-            try:
-                # we pass the ui instance are warning might need to be issued
-                return path(ui, None, rawloc=name)
-            except ValueError:
+            path = try_path(ui, name)
+            if path is None:
                 raise error.RepoError(_(b'repository %s does not exist') % name)
+            return path.rawloc
 
 
 _pathsuboptions = {}
--- a/mercurial/verify.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/verify.py	Mon May 03 18:55:19 2021 +0200
@@ -10,13 +10,8 @@
 import os
 
 from .i18n import _
-from .node import (
-    nullid,
-    short,
-)
-from .utils import (
-    stringutil,
-)
+from .node import short
+from .utils import stringutil
 
 from . import (
     error,
@@ -159,13 +154,13 @@
 
         try:
             p1, p2 = obj.parents(node)
-            if p1 not in seen and p1 != nullid:
+            if p1 not in seen and p1 != self.repo.nullid:
                 self._err(
                     lr,
                     _(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
                     f,
                 )
-            if p2 not in seen and p2 != nullid:
+            if p2 not in seen and p2 != self.repo.nullid:
                 self._err(
                     lr,
                     _(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
@@ -267,7 +262,7 @@
 
             try:
                 changes = cl.read(n)
-                if changes[0] != nullid:
+                if changes[0] != self.repo.nullid:
                     mflinkrevs.setdefault(changes[0], []).append(i)
                     self.refersmf = True
                 for f in changes[3]:
@@ -598,7 +593,7 @@
                                 % (rp[0], short(rp[1])),
                                 f,
                             )
-                        elif rp[1] == nullid:
+                        elif rp[1] == self.repo.nullid:
                             ui.note(
                                 _(
                                     b"warning: %s@%s: copy source"
--- a/mercurial/wireprotov1server.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/wireprotov1server.py	Mon May 03 18:55:19 2021 +0200
@@ -11,10 +11,7 @@
 import os
 
 from .i18n import _
-from .node import (
-    hex,
-    nullid,
-)
+from .node import hex
 from .pycompat import getattr
 
 from . import (
@@ -470,7 +467,7 @@
         clheads = set(repo.changelog.heads())
         heads = set(opts.get(b'heads', set()))
         common = set(opts.get(b'common', set()))
-        common.discard(nullid)
+        common.discard(repo.nullid)
         if (
             repo.ui.configbool(b'server', b'pullbundle')
             and b'partial-pull' in proto.getprotocaps()
--- a/mercurial/wireprotov2server.py	Sat May 01 00:28:39 2021 -0400
+++ b/mercurial/wireprotov2server.py	Mon May 03 18:55:19 2021 +0200
@@ -10,10 +10,7 @@
 import contextlib
 
 from .i18n import _
-from .node import (
-    hex,
-    nullid,
-)
+from .node import hex
 from . import (
     discovery,
     encoding,
@@ -950,7 +947,7 @@
             if spec[b'roots']:
                 common = [n for n in spec[b'roots'] if clhasnode(n)]
             else:
-                common = [nullid]
+                common = [repo.nullid]
 
             for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
                 if n not in seen:
--- a/rust/hg-cpython/src/revlog.rs	Sat May 01 00:28:39 2021 -0400
+++ b/rust/hg-cpython/src/revlog.rs	Mon May 03 18:55:19 2021 +0200
@@ -172,6 +172,16 @@
         self.call_cindex(py, "clearcaches", args, kw)
     }
 
+    /// return the raw binary string representing a revision
+    def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "entry_binary", args, kw)
+    }
+
+    /// return a binary packed version of the header
+    def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
+        self.call_cindex(py, "pack_header", args, kw)
+    }
+
     /// get an index entry
     def get(&self, *args, **kw) -> PyResult<PyObject> {
         self.call_cindex(py, "get", args, kw)
--- a/tests/drawdag.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/drawdag.py	Mon May 03 18:55:19 2021 +0200
@@ -86,7 +86,6 @@
 import itertools
 import re
 
-from mercurial.node import nullid
 from mercurial.i18n import _
 from mercurial import (
     context,
@@ -299,7 +298,7 @@
         self._added = added
         self._parents = parentctxs
         while len(self._parents) < 2:
-            self._parents.append(repo[nullid])
+            self._parents.append(repo[repo.nullid])
 
     def filectx(self, key):
         return simplefilectx(key, self._added[key])
@@ -388,7 +387,7 @@
         content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
         files[name][path] = content
 
-    committed = {None: nullid}  # {name: node}
+    committed = {None: repo.nullid}  # {name: node}
 
     # for leaf nodes, try to find existing nodes in repo
     for name, parents in edges.items():
--- a/tests/simplestorerepo.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/simplestorerepo.py	Mon May 03 18:55:19 2021 +0200
@@ -18,7 +18,6 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     nullrev,
 )
 from mercurial.thirdparty import attr
@@ -136,18 +135,18 @@
             self._indexbynode[entry[b'node']] = entry
             self._indexbyrev[i] = entry
 
-        self._indexbynode[nullid] = {
-            b'node': nullid,
-            b'p1': nullid,
-            b'p2': nullid,
+        self._indexbynode[self._repo.nullid] = {
+            b'node': self._repo.nullid,
+            b'p1': self._repo.nullid,
+            b'p2': self._repo.nullid,
             b'linkrev': nullrev,
             b'flags': 0,
         }
 
         self._indexbyrev[nullrev] = {
-            b'node': nullid,
-            b'p1': nullid,
-            b'p2': nullid,
+            b'node': self._repo.nullid,
+            b'p1': self._repo.nullid,
+            b'p2': self._repo.nullid,
             b'linkrev': nullrev,
             b'flags': 0,
         }
@@ -160,7 +159,7 @@
                 (0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
             )
 
-        self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
+        self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
 
     def __len__(self):
         return len(self._indexdata)
@@ -288,7 +287,7 @@
             node = nodeorrev
         validatenode(node)
 
-        if node == nullid:
+        if node == self._repo.nullid:
             return b''
 
         rev = self.rev(node)
@@ -325,7 +324,7 @@
     def renamed(self, node):
         validatenode(node)
 
-        if self.parents(node)[0] != nullid:
+        if self.parents(node)[0] != self._repo.nullid:
             return False
 
         fulltext = self.revision(node)
@@ -451,7 +450,7 @@
         sidedata_helpers=None,
     ):
         # TODO this will probably break on some ordering options.
-        nodes = [n for n in nodes if n != nullid]
+        nodes = [n for n in nodes if n != self._repo.nullid]
         if not nodes:
             return
         for delta in storageutil.emitrevisions(
@@ -559,7 +558,7 @@
                 continue
 
             # Need to resolve the fulltext from the delta base.
-            if deltabase == nullid:
+            if deltabase == self._repo.nullid:
                 text = mdiff.patch(b'', delta)
             else:
                 text = mdiff.patch(self.revision(deltabase), delta)
@@ -588,11 +587,11 @@
         # This is copied from revlog.py.
         if start is None and stop is None:
             if not len(self):
-                return [nullid]
+                return [self._repo.nullid]
             return [self.node(r) for r in self._headrevs()]
 
         if start is None:
-            start = nullid
+            start = self._repo.nullid
         if stop is None:
             stop = []
         stoprevs = {self.rev(n) for n in stop}
--- a/tests/test-amend.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-amend.t	Mon May 03 18:55:19 2021 +0200
@@ -197,6 +197,7 @@
   $ echo 2 >> B
   $ hg amend
   abort: cannot amend changeset with children
+  (see 'hg help evolution.instability')
   [10]
 
 #if obsstore-on
@@ -231,6 +232,17 @@
   $ hg debugobsolete -r .
   112478962961147124edd43549aedd1a335e44bf be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
   be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 16084da537dd8f84cfdb3055c633772269d62e1b 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'adding bar', 'operation': 'amend', 'user': 'test'}
+
+Cannot cause divergence by default
+
+  $ hg co --hidden 1
+  1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg amend -m divergent
+  abort: cannot amend 112478962961, as that creates content-divergence with 16084da537dd
+  (add --verbose for details)
+  [10]
+  $ hg amend -m divergent --config experimental.evolution.allowdivergence=true
+  2 new content-divergent changesets
 #endif
 
 Cannot amend public changeset
--- a/tests/test-annotate.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-annotate.t	Mon May 03 18:55:19 2021 +0200
@@ -479,19 +479,19 @@
 
   $ cat > ../legacyrepo.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import commit, error, extensions, node
+  > from mercurial import commit, error, extensions
   > def _filecommit(orig, repo, fctx, manifest1, manifest2,
   >                 linkrev, tr, includecopymeta, ms):
   >     fname = fctx.path()
   >     text = fctx.data()
   >     flog = repo.file(fname)
-  >     fparent1 = manifest1.get(fname, node.nullid)
-  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     fparent1 = manifest1.get(fname, repo.nullid)
+  >     fparent2 = manifest2.get(fname, repo.nullid)
   >     meta = {}
   >     copy = fctx.copysource()
   >     if copy and copy != fname:
   >         raise error.Abort('copying is not supported')
-  >     if fparent2 != node.nullid:
+  >     if fparent2 != repo.nullid:
   >         return flog.add(text, meta, tr, linkrev,
   >                         fparent1, fparent2), 'modified'
   >     raise error.Abort('only merging is supported')
--- a/tests/test-blackbox.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-blackbox.t	Mon May 03 18:55:19 2021 +0200
@@ -221,7 +221,7 @@
   1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
   1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob)
   1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob)
-  1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
+  1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --no-profile --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
   1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 5
 
 log rotation
--- a/tests/test-branch-change.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-branch-change.t	Mon May 03 18:55:19 2021 +0200
@@ -58,6 +58,7 @@
 
   $ hg branch -r 1::3 foo
   abort: cannot change branch of changeset with children
+  (see 'hg help evolution.instability')
   [10]
 
 Change with dirty working directory
@@ -129,6 +130,7 @@
 
   $ hg branch -r 2 stable
   abort: cannot change branch of changeset with children
+  (see 'hg help evolution.instability')
   [10]
 
 Enabling the allowunstable config and trying to change branch on a branch head
@@ -148,7 +150,8 @@
   [255]
 
   $ hg branch -r 4 --hidden foobar
-  abort: cannot change branch of a obsolete changeset
+  abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
+  (add --verbose for details)
   [10]
 
 Make sure bookmark movement is correct
--- a/tests/test-chg.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-chg.t	Mon May 03 18:55:19 2021 +0200
@@ -458,6 +458,7 @@
   LC_CTYPE=
   $ (unset LC_ALL; unset LANG; LC_CTYPE=unsupported_value chg \
   >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+  *cannot change locale* (glob) (?)
   LC_CTYPE=unsupported_value
   $ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
   >    --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
@@ -467,3 +468,72 @@
   LC_ALL=
   LC_CTYPE=
   LANG=
+
+Profiling isn't permanently enabled or carried over between chg invocations that
+share the same server
+  $ cp $HGRCPATH.orig $HGRCPATH
+  $ hg init $TESTTMP/profiling
+  $ cd $TESTTMP/profiling
+  $ filteredchg() {
+  >   CHGDEBUG=1 chg "$@" 2>&1 | egrep 'Sample count|start cmdserver' || true
+  > }
+  $ newchg() {
+  >   chg --kill-chg-daemon
+  >   filteredchg "$@" | egrep -v 'start cmdserver' || true
+  > }
+(--profile isn't permanently on just because it was specified when chg was
+started)
+  $ newchg log -r . --profile
+  Sample count: * (glob)
+  $ filteredchg log -r .
+(enabling profiling via config works, even on the first chg command that starts
+a cmdserver)
+  $ cat >> $HGRCPATH <<EOF
+  > [profiling]
+  > type=stat
+  > enabled=1
+  > EOF
+  $ newchg log -r .
+  Sample count: * (glob)
+  $ filteredchg log -r .
+  Sample count: * (glob)
+(test that we aren't accumulating more and more samples each run)
+  $ cat > $TESTTMP/debugsleep.py <<EOF
+  > import time
+  > from mercurial import registrar
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > @command(b'debugsleep', [], b'', norepo=True)
+  > def debugsleep(ui):
+  >   start = time.time()
+  >   x = 0
+  >   while time.time() < start + 0.5:
+  >     time.sleep(.1)
+  >     x += 1
+  >   ui.status(b'%d debugsleep iterations in %.03fs\n' % (x, time.time() - start))
+  > EOF
+  $ cat >> $HGRCPATH <<EOF
+  > [extensions]
+  > debugsleep = $TESTTMP/debugsleep.py
+  > EOF
+  $ newchg debugsleep > run_1
+  $ filteredchg debugsleep > run_2
+  $ filteredchg debugsleep > run_3
+  $ filteredchg debugsleep > run_4
+FIXME: Run 4 should not be >3x Run 1's number of samples.
+  $ "$PYTHON" <<EOF
+  > r1 = int(open("run_1", "r").read().split()[-1])
+  > r4 = int(open("run_4", "r").read().split()[-1])
+  > print("Run 1: %d samples\nRun 4: %d samples\nRun 4 > 3 * Run 1: %s" %
+  >       (r1, r4, r4 > (r1 * 3)))
+  > EOF
+  Run 1: * samples (glob)
+  Run 4: * samples (glob)
+  Run 4 > 3 * Run 1: False
+(Disabling with --no-profile on the commandline still works, but isn't permanent)
+  $ newchg log -r . --no-profile
+  $ filteredchg log -r .
+  Sample count: * (glob)
+  $ filteredchg log -r . --no-profile
+  $ filteredchg log -r .
+  Sample count: * (glob)
--- a/tests/test-commit-amend.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-commit-amend.t	Mon May 03 18:55:19 2021 +0200
@@ -406,7 +406,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   (branch merge, don't forget to commit)
   $ hg ci --amend
-  abort: cannot amend while merging
+  abort: cannot amend changesets while merging
   [20]
   $ hg ci -m 'merge'
 
--- a/tests/test-commit.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-commit.t	Mon May 03 18:55:19 2021 +0200
@@ -646,14 +646,14 @@
 verify pathauditor blocks evil filepaths
   $ cat > evil-commit.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import context, hg, node, ui as uimod
+  > from mercurial import context, hg, ui as uimod
   > notrc = u".h\u200cg".encode('utf-8') + b'/hgrc'
   > u = uimod.ui.load()
   > r = hg.repository(u, b'.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r.changelog.tip(), node.nullid],
+  > c = context.memctx(r, [r.changelog.tip(), r.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
@@ -672,14 +672,14 @@
   repository tip rolled back to revision 2 (undo commit)
   $ cat > evil-commit.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import context, hg, node, ui as uimod
+  > from mercurial import context, hg, ui as uimod
   > notrc = b"HG~1/hgrc"
   > u = uimod.ui.load()
   > r = hg.repository(u, b'.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+  > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
@@ -692,14 +692,14 @@
   repository tip rolled back to revision 2 (undo commit)
   $ cat > evil-commit.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import context, hg, node, ui as uimod
+  > from mercurial import context, hg, ui as uimod
   > notrc = b"HG8B6C~2/hgrc"
   > u = uimod.ui.load()
   > r = hg.repository(u, b'.')
   > def filectxfn(repo, memctx, path):
   >     return context.memfilectx(repo, memctx, path,
   >         b'[hooks]\nupdate = echo owned')
-  > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+  > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
   >                    b'evil', [notrc], filectxfn, 0)
   > r.commitctx(c)
   > EOF
--- a/tests/test-fastannotate-hg.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-fastannotate-hg.t	Mon May 03 18:55:19 2021 +0200
@@ -482,19 +482,19 @@
 
   $ cat > ../legacyrepo.py <<EOF
   > from __future__ import absolute_import
-  > from mercurial import commit, error, extensions, node
+  > from mercurial import commit, error, extensions
   > def _filecommit(orig, repo, fctx, manifest1, manifest2,
   >                 linkrev, tr, includecopymeta, ms):
   >     fname = fctx.path()
   >     text = fctx.data()
   >     flog = repo.file(fname)
-  >     fparent1 = manifest1.get(fname, node.nullid)
-  >     fparent2 = manifest2.get(fname, node.nullid)
+  >     fparent1 = manifest1.get(fname, repo.nullid)
+  >     fparent2 = manifest2.get(fname, repo.nullid)
   >     meta = {}
   >     copy = fctx.copysource()
   >     if copy and copy != fname:
   >         raise error.Abort('copying is not supported')
-  >     if fparent2 != node.nullid:
+  >     if fparent2 != repo.nullid:
   >         return flog.add(text, meta, tr, linkrev,
   >                         fparent1, fparent2), 'modified'
   >     raise error.Abort('only merging is supported')
--- a/tests/test-filelog.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-filelog.py	Mon May 03 18:55:19 2021 +0200
@@ -4,10 +4,7 @@
 """
 from __future__ import absolute_import, print_function
 
-from mercurial.node import (
-    hex,
-    nullid,
-)
+from mercurial.node import hex
 from mercurial import (
     hg,
     ui as uimod,
@@ -22,7 +19,7 @@
 def addrev(text, renamed=False):
     if renamed:
         # data doesn't matter. Just make sure filelog.renamed() returns True
-        meta = {b'copyrev': hex(nullid), b'copy': b'bar'}
+        meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
     else:
         meta = {}
 
@@ -30,7 +27,7 @@
     try:
         lock = repo.lock()
         t = repo.transaction(b'commit')
-        node = fl.add(text, meta, t, 0, nullid, nullid)
+        node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
         return node
     finally:
         if t:
--- a/tests/test-fix.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-fix.t	Mon May 03 18:55:19 2021 +0200
@@ -1173,6 +1173,7 @@
   $ hg commit -m "second"
   $ hg --config experimental.evolution.allowunstable=False fix -r '.^'
   abort: cannot fix changeset with children
+  (see 'hg help evolution.instability')
   [10]
   $ hg fix -r '.^'
   1 new orphan changesets
--- a/tests/test-globalopts.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-globalopts.t	Mon May 03 18:55:19 2021 +0200
@@ -419,6 +419,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -552,6 +553,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
--- a/tests/test-help-hide.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-help-hide.t	Mon May 03 18:55:19 2021 +0200
@@ -117,6 +117,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -254,6 +255,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
--- a/tests/test-help.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-help.t	Mon May 03 18:55:19 2021 +0200
@@ -169,6 +169,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -298,6 +299,7 @@
   Concepts:
   
    bundlespec    Bundle File Formats
+   evolution     Safely rewriting history (EXPERIMENTAL)
    glossary      Glossary
    phases        Working with Phases
    subrepos      Subrepositories
@@ -2274,6 +2276,13 @@
   Environment Variables
   </td></tr>
   <tr><td>
+  <a href="/help/evolution">
+  evolution
+  </a>
+  </td><td>
+  Safely rewriting history (EXPERIMENTAL)
+  </td></tr>
+  <tr><td>
   <a href="/help/extensions">
   extensions
   </a>
--- a/tests/test-hgweb-json.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-hgweb-json.t	Mon May 03 18:55:19 2021 +0200
@@ -2272,6 +2272,10 @@
         "topic": "environment"
       },
       {
+        "summary": "Safely rewriting history (EXPERIMENTAL)",
+        "topic": "evolution"
+      },
+      {
         "summary": "Using Additional Features",
         "topic": "extensions"
       },
--- a/tests/test-manifest.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-manifest.py	Mon May 03 18:55:19 2021 +0200
@@ -81,12 +81,12 @@
         raise NotImplementedError('parsemanifest not implemented by test case')
 
     def testEmptyManifest(self):
-        m = self.parsemanifest(EMTPY_MANIFEST)
+        m = self.parsemanifest(20, EMTPY_MANIFEST)
         self.assertEqual(0, len(m))
         self.assertEqual([], list(m))
 
     def testManifest(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
         self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
         self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
@@ -95,20 +95,16 @@
         with self.assertRaises(KeyError):
             m[b'wat']
 
-    def testManifestLongHashes(self):
-        m = self.parsemanifest(b'a\0' + b'f' * 64 + b'\n')
-        self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
-
     def testSetItem(self):
         want = BIN_HASH_1
 
-        m = self.parsemanifest(EMTPY_MANIFEST)
+        m = self.parsemanifest(20, EMTPY_MANIFEST)
         m[b'a'] = want
         self.assertIn(b'a', m)
         self.assertEqual(want, m[b'a'])
         self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
 
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         m[b'a'] = want
         self.assertEqual(want, m[b'a'])
         self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
@@ -116,14 +112,14 @@
     def testSetFlag(self):
         want = b'x'
 
-        m = self.parsemanifest(EMTPY_MANIFEST)
+        m = self.parsemanifest(20, EMTPY_MANIFEST)
         # first add a file; a file-less flag makes no sense
         m[b'a'] = BIN_HASH_1
         m.setflag(b'a', want)
         self.assertEqual(want, m.flags(b'a'))
         self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
 
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         # first add a file; a file-less flag makes no sense
         m[b'a'] = BIN_HASH_1
         m.setflag(b'a', want)
@@ -133,7 +129,7 @@
         )
 
     def testCopy(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         m[b'a'] = BIN_HASH_1
         m2 = m.copy()
         del m
@@ -142,7 +138,7 @@
     def testCompaction(self):
         unhex = binascii.unhexlify
         h1, h2 = unhex(HASH_1), unhex(HASH_2)
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         m[b'alpha'] = h1
         m[b'beta'] = h2
         del m[b'foo']
@@ -164,7 +160,7 @@
             m[b'foo']
 
     def testMatchException(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
 
         def filt(path):
@@ -177,7 +173,7 @@
             m._matches(match)
 
     def testRemoveItem(self):
-        m = self.parsemanifest(A_SHORT_MANIFEST)
+        m = self.parsemanifest(20, A_SHORT_MANIFEST)
         del m[b'foo']
         with self.assertRaises(KeyError):
             m[b'foo']
@@ -193,9 +189,9 @@
         addl = b'z-only-in-left\0' + HASH_1 + b'\n'
         addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
         left = self.parsemanifest(
-            A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
+            20, A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
         )
-        right = self.parsemanifest(A_SHORT_MANIFEST + addr)
+        right = self.parsemanifest(20, A_SHORT_MANIFEST + addr)
         want = {
             b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
             b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
@@ -208,14 +204,18 @@
             b'foo': (MISSING, (BIN_HASH_3, b'x')),
             b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
         }
-        self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
+        self.assertEqual(
+            want, self.parsemanifest(20, EMTPY_MANIFEST).diff(left)
+        )
 
         want = {
             b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
             b'foo': ((BIN_HASH_3, b'x'), MISSING),
             b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
         }
-        self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
+        self.assertEqual(
+            want, left.diff(self.parsemanifest(20, EMTPY_MANIFEST))
+        )
         copy = right.copy()
         del copy[b'z-only-in-right']
         del right[b'foo']
@@ -225,7 +225,7 @@
         }
         self.assertEqual(want, right.diff(copy))
 
-        short = self.parsemanifest(A_SHORT_MANIFEST)
+        short = self.parsemanifest(20, A_SHORT_MANIFEST)
         pruned = short.copy()
         del pruned[b'foo']
         want = {
@@ -247,27 +247,27 @@
             l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
         )
         try:
-            self.parsemanifest(backwards)
+            self.parsemanifest(20, backwards)
             self.fail('Should have raised ValueError')
         except ValueError as v:
             self.assertIn('Manifest lines not in sorted order.', str(v))
 
     def testNoTerminalNewline(self):
         try:
-            self.parsemanifest(A_SHORT_MANIFEST + b'wat')
+            self.parsemanifest(20, A_SHORT_MANIFEST + b'wat')
             self.fail('Should have raised ValueError')
         except ValueError as v:
             self.assertIn('Manifest did not end in a newline.', str(v))
 
     def testNoNewLineAtAll(self):
         try:
-            self.parsemanifest(b'wat')
+            self.parsemanifest(20, b'wat')
             self.fail('Should have raised ValueError')
         except ValueError as v:
             self.assertIn('Manifest did not end in a newline.', str(v))
 
     def testHugeManifest(self):
-        m = self.parsemanifest(A_HUGE_MANIFEST)
+        m = self.parsemanifest(20, A_HUGE_MANIFEST)
         self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
         self.assertEqual(len(m), len(list(m)))
 
@@ -275,7 +275,7 @@
         """Tests matches() for a few specific files to make sure that both
         the set of files as well as their flags and nodeids are correct in
         the resulting manifest."""
-        m = self.parsemanifest(A_HUGE_MANIFEST)
+        m = self.parsemanifest(20, A_HUGE_MANIFEST)
 
         match = matchmod.exact([b'file1', b'file200', b'file300'])
         m2 = m._matches(match)
@@ -291,7 +291,7 @@
         """Tests matches() for a small set of specific files, including one
         nonexistent file to make sure in only matches against existing files.
         """
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.exact(
             [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
@@ -305,7 +305,7 @@
     def testMatchesNonexistentDirectory(self):
         """Tests matches() for a relpath match on a directory that doesn't
         actually exist."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(
             util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
@@ -316,7 +316,7 @@
 
     def testMatchesExactLarge(self):
         """Tests matches() for files matching a large list of exact files."""
-        m = self.parsemanifest(A_HUGE_MANIFEST)
+        m = self.parsemanifest(20, A_HUGE_MANIFEST)
 
         flist = m.keys()[80:300]
         match = matchmod.exact(flist)
@@ -326,7 +326,7 @@
 
     def testMatchesFull(self):
         '''Tests matches() for what should be a full match.'''
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
         m2 = m._matches(match)
@@ -336,7 +336,7 @@
     def testMatchesDirectory(self):
         """Tests matches() on a relpath match on a directory, which should
         match against all files within said directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(
             util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
@@ -362,7 +362,7 @@
         """Tests matches() on an exact match on a directory, which should
         result in an empty manifest because you can't perform an exact match
         against a directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.exact([b'a/b'])
         m2 = m._matches(match)
@@ -372,7 +372,7 @@
     def testMatchesCwd(self):
         """Tests matches() on a relpath match with the current directory ('.')
         when not in the root directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(
             util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
@@ -397,7 +397,7 @@
     def testMatchesWithPattern(self):
         """Tests matches() for files matching a pattern that reside
         deeper than the specified directory."""
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
         m2 = m._matches(match)
@@ -408,8 +408,12 @@
 
 
 class testmanifestdict(unittest.TestCase, basemanifesttests):
-    def parsemanifest(self, text):
-        return manifestmod.manifestdict(text)
+    def parsemanifest(self, nodelen, text):
+        return manifestmod.manifestdict(nodelen, text)
+
+    def testManifestLongHashes(self):
+        m = self.parsemanifest(32, b'a\0' + b'f' * 64 + b'\n')
+        self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
 
     def testObviouslyBogusManifest(self):
         # This is a 163k manifest that came from oss-fuzz. It was a
@@ -433,15 +437,15 @@
             b'\xac\xbe'
         )
         with self.assertRaises(ValueError):
-            self.parsemanifest(data)
+            self.parsemanifest(20, data)
 
 
 class testtreemanifest(unittest.TestCase, basemanifesttests):
-    def parsemanifest(self, text):
+    def parsemanifest(self, nodelen, text):
         return manifestmod.treemanifest(sha1nodeconstants, b'', text)
 
     def testWalkSubtrees(self):
-        m = self.parsemanifest(A_DEEPER_MANIFEST)
+        m = self.parsemanifest(20, A_DEEPER_MANIFEST)
 
         dirs = [s._dir for s in m.walksubtrees()]
         self.assertEqual(
--- a/tests/test-merge-subrepos.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-merge-subrepos.t	Mon May 03 18:55:19 2021 +0200
@@ -61,7 +61,7 @@
   > --config blackbox.track='command commandfinish'
   9bfe45a197d7+ tip
   $ cat .hg/blackbox.log
-  * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --cmdserver chgunix * (glob) (chg !)
+  * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
   * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* (glob)
   * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* exited 0 * (glob)
 
--- a/tests/test-narrow-clone-non-narrow-server.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-narrow-clone-non-narrow-server.t	Mon May 03 18:55:19 2021 +0200
@@ -57,6 +57,7 @@
   comparing with http://localhost:$HGPORT1/
   searching for changes
   looking for local changes to affected paths
+  deleting unwanted files from working copy
 
   $ hg tracked --addinclude f1 http://localhost:$HGPORT1/
   nothing to widen or narrow
--- a/tests/test-narrow-patterns.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-narrow-patterns.t	Mon May 03 18:55:19 2021 +0200
@@ -193,6 +193,7 @@
   deleting data/dir1/dirA/bar.i (reporevlogstore !)
   deleting data/dir1/dirA/bar/0eca1d0cbdaea4651d1d04d71976a6d2d9bfaae5 (reposimplestore !)
   deleting data/dir1/dirA/bar/index (reposimplestore !)
+  deleting unwanted files from working copy
   saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
   adding changesets
   adding manifests
@@ -249,6 +250,7 @@
   deleting data/dir1/dirA/foo.i (reporevlogstore !)
   deleting data/dir1/dirA/foo/162caeb3d55dceb1fee793aa631ac8c73fcb8b5e (reposimplestore !)
   deleting data/dir1/dirA/foo/index (reposimplestore !)
+  deleting unwanted files from working copy
   saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
   adding changesets
   adding manifests
--- a/tests/test-narrow-share.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-narrow-share.t	Mon May 03 18:55:19 2021 +0200
@@ -94,6 +94,7 @@
   deleting meta/d1/00manifest.i (tree !)
   deleting meta/d3/00manifest.i (tree !)
   deleting meta/d5/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ hg -R main tracked
   I path:d7
   $ hg -R main files
--- a/tests/test-narrow-trackedcmd.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-narrow-trackedcmd.t	Mon May 03 18:55:19 2021 +0200
@@ -150,6 +150,7 @@
   looking for local changes to affected paths
   deleting data/inside/f.i
   deleting meta/inside/00manifest.i (tree !)
+  deleting unwanted files from working copy
   saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
   adding changesets
   adding manifests
@@ -191,6 +192,7 @@
   looking for local changes to affected paths
   deleting data/widest/f.i
   deleting meta/widest/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:outisde
   I path:wider
--- a/tests/test-narrow.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-narrow.t	Mon May 03 18:55:19 2021 +0200
@@ -132,12 +132,14 @@
   looking for local changes to affected paths
   The following changeset(s) or their ancestors have local changes not on the remote:
   * (glob)
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d0/f.i (reporevlogstore !)
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
 
   $ hg log -T "{rev}: {desc} {outsidenarrow}\n"
   7: local change to d3 
@@ -164,12 +166,14 @@
   comparing with ssh://user@dummy/master
   searching for changes
   looking for local changes to affected paths
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d0/f.i (reporevlogstore !)
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
 
 Updates off of stripped commit if necessary
   $ hg co -r 'desc("local change to d3")' -q
@@ -183,12 +187,14 @@
   * (glob)
   * (glob)
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d3/f.i (reporevlogstore !)
   deleting meta/d3/00manifest.i (tree !)
   deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
   deleting data/d3/f/99fa7136105a15e2045ce3d9152e4837c5349e4d (reposimplestore !)
   deleting data/d3/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg log -T '{desc}\n' -r .
   add d10/f
 Updates to nullid if necessary
@@ -206,12 +212,14 @@
   The following changeset(s) or their ancestors have local changes not on the remote:
   * (glob)
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d3/f.i (reporevlogstore !)
   deleting meta/d3/00manifest.i (tree !)
   deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
   deleting data/d3/f/5ce0767945cbdbca3b924bb9fbf5143f72ab40ac (reposimplestore !)
   deleting data/d3/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg id
   000000000000
   $ cd ..
@@ -272,6 +280,7 @@
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   $ hg files
   [1]
@@ -332,6 +341,7 @@
   deleting meta/d6/00manifest.i (tree !)
   deleting data/d6/f/7339d30678f451ac8c3f38753beeb4cf2e1655c7 (reposimplestore !)
   deleting data/d6/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d0
   I path:d3
@@ -355,6 +365,7 @@
   deleting data/d3/f.i (reporevlogstore !)
   deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
   deleting data/d3/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d0
   I path:d3
@@ -378,6 +389,7 @@
   deleting meta/d0/00manifest.i (tree !)
   deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
   deleting data/d0/f/index (reposimplestore !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d3
   I path:d9
@@ -478,11 +490,13 @@
   path:d2
   remove these unused includes (yn)? y
   looking for local changes to affected paths
+  moving unwanted changesets to backup
   saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
   deleting data/d0/f.i
   deleting data/d2/f.i
   deleting meta/d0/00manifest.i (tree !)
   deleting meta/d2/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ hg tracked
   I path:d1
   $ hg files
@@ -504,10 +518,12 @@
   path:d2
   remove these unused includes (yn)? y
   looking for local changes to affected paths
+  deleting unwanted changesets
   deleting data/d0/f.i
   deleting data/d2/f.i
   deleting meta/d0/00manifest.i (tree !)
   deleting meta/d2/00manifest.i (tree !)
+  deleting unwanted files from working copy
   $ ls .hg/strip-backup/
 
 
@@ -521,4 +537,5 @@
   looking for local changes to affected paths
   deleting data/d0/f.i
   deleting meta/d0/00manifest.i (tree !)
+  deleting unwanted files from working copy
   not deleting possibly dirty file d0/f
--- a/tests/test-obshistory.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-obshistory.t	Mon May 03 18:55:19 2021 +0200
@@ -13,6 +13,7 @@
   > [experimental]
   > evolution.createmarkers = yes
   > evolution.effect-flags = yes
+  > evolution.allowdivergence=true
   > EOF
 
 Test output on amended commit
--- a/tests/test-obsmarker-template.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-obsmarker-template.t	Mon May 03 18:55:19 2021 +0200
@@ -11,6 +11,7 @@
   > publish=False
   > [experimental]
   > evolution=true
+  > evolution.allowdivergence=true
   > [templates]
   > obsfatesuccessors = "{if(successors, " as ")}{join(successors, ", ")}"
   > obsfateverb = "{obsfateverb(successors, markers)}"
--- a/tests/test-parseindex2.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-parseindex2.py	Mon May 03 18:55:19 2021 +0200
@@ -14,8 +14,8 @@
 from mercurial.node import (
     bin,
     hex,
-    nullid,
     nullrev,
+    sha1nodeconstants,
 )
 from mercurial import (
     policy,
@@ -40,7 +40,7 @@
     s = 64
     cache = None
     index = []
-    nodemap = {nullid: nullrev}
+    nodemap = {sha1nodeconstants.nullid: nullrev}
     n = off = 0
 
     l = len(data) - s
@@ -227,7 +227,7 @@
 
         ix = parsers.parse_index2(data_inlined, True)[0]
         for i, r in enumerate(ix):
-            if r[7] == nullid:
+            if r[7] == sha1nodeconstants.nullid:
                 i = -1
             try:
                 self.assertEqual(
@@ -240,7 +240,7 @@
                 break
 
     def testminusone(self):
-        want = (0, 0, 0, -1, -1, -1, -1, nullid)
+        want = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
         index, junk = parsers.parse_index2(data_inlined, True)
         got = index[-1]
         self.assertEqual(want, got)  # inline data
--- a/tests/test-rebase-collapse.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-rebase-collapse.t	Mon May 03 18:55:19 2021 +0200
@@ -550,7 +550,7 @@
   
   $ hg rebase --collapse -r 1 -d 0
   abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  (see 'hg help evolution.instability')
   [10]
 
 Test collapsing in place
--- a/tests/test-rebase-scenario-global.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-rebase-scenario-global.t	Mon May 03 18:55:19 2021 +0200
@@ -453,7 +453,7 @@
   $ cd ah1
   $ hg rebase -r '2::8' -d 1
   abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '2::8' -d 1 -k
   rebasing 2:c9e50f6cdc55 "C"
@@ -499,7 +499,7 @@
   $ cd ah2
   $ hg rebase -r '3::8' -d 1
   abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '3::8' -d 1 --keep
   rebasing 3:ffd453c31098 "D"
@@ -542,7 +542,7 @@
   $ cd ah3
   $ hg rebase -r '3::7' -d 1
   abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '3::7' -d 1 --keep
   rebasing 3:ffd453c31098 "D"
@@ -582,7 +582,7 @@
   $ cd ah4
   $ hg rebase -r '3::(7+5)' -d 1
   abort: cannot rebase changeset with children
-  (use --keep to keep original changesets)
+  (see 'hg help evolution.instability')
   [10]
   $ hg rebase -r '3::(7+5)' -d 1 --keep
   rebasing 3:ffd453c31098 "D"
--- a/tests/test-remotefilelog-datapack.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-remotefilelog-datapack.py	Mon May 03 18:55:19 2021 +0200
@@ -16,7 +16,7 @@
 
 # Load the local remotefilelog, not the system one
 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
 from mercurial import policy
 
 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
@@ -63,7 +63,14 @@
 
     def createPack(self, revisions=None, packdir=None):
         if revisions is None:
-            revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
+            revisions = [
+                (
+                    b"filename",
+                    self.getFakeHash(),
+                    sha1nodeconstants.nullid,
+                    b"content",
+                )
+            ]
 
         if packdir is None:
             packdir = self.makeTempDir()
@@ -86,7 +93,7 @@
         filename = b"foo"
         node = self.getHash(content)
 
-        revisions = [(filename, node, nullid, content)]
+        revisions = [(filename, node, sha1nodeconstants.nullid, content)]
         pack = self.createPack(revisions)
         if self.paramsavailable:
             self.assertEqual(
@@ -126,7 +133,7 @@
         """Test putting multiple delta blobs into a pack and read the chain."""
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             content = b"abcdef%d" % i
             node = self.getHash(content)
@@ -157,7 +164,7 @@
             for j in range(random.randint(1, 100)):
                 content = b"content-%d" % j
                 node = self.getHash(content)
-                lastnode = nullid
+                lastnode = sha1nodeconstants.nullid
                 if len(filerevs) > 0:
                     lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
                 filerevs.append(node)
@@ -185,7 +192,9 @@
                 b'Z': b'random_string',
                 b'_': b'\0' * i,
             }
-            revisions.append((filename, node, nullid, content, meta))
+            revisions.append(
+                (filename, node, sha1nodeconstants.nullid, content, meta)
+            )
         pack = self.createPack(revisions)
         for name, node, x, content, origmeta in revisions:
             parsedmeta = pack.getmeta(name, node)
@@ -198,7 +207,7 @@
         """Test the getmissing() api."""
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             content = b"abcdef%d" % i
             node = self.getHash(content)
@@ -225,7 +234,7 @@
         pack = self.createPack()
 
         try:
-            pack.add(b'filename', nullid, b'contents')
+            pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
             self.assertTrue(False, "datapack.add should throw")
         except RuntimeError:
             pass
@@ -264,7 +273,9 @@
             content = filename
             node = self.getHash(content)
             blobs[(filename, node)] = content
-            revisions.append((filename, node, nullid, content))
+            revisions.append(
+                (filename, node, sha1nodeconstants.nullid, content)
+            )
 
         pack = self.createPack(revisions)
         if self.paramsavailable:
@@ -288,7 +299,12 @@
 
         for i in range(numpacks):
             chain = []
-            revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
+            revision = (
+                b'%d' % i,
+                self.getFakeHash(),
+                sha1nodeconstants.nullid,
+                b"content",
+            )
 
             for _ in range(revisionsperpack):
                 chain.append(revision)
@@ -346,7 +362,9 @@
                 filename = b"filename-%d" % i
                 content = b"content-%d" % i
                 node = self.getHash(content)
-                revisions.append((filename, node, nullid, content))
+                revisions.append(
+                    (filename, node, sha1nodeconstants.nullid, content)
+                )
 
             path = self.createPack(revisions).path
 
--- a/tests/test-remotefilelog-histpack.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-remotefilelog-histpack.py	Mon May 03 18:55:19 2021 +0200
@@ -13,7 +13,7 @@
 
 import silenttestrunner
 
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
 from mercurial import (
     pycompat,
     ui as uimod,
@@ -59,8 +59,8 @@
                 (
                     b"filename",
                     self.getFakeHash(),
-                    nullid,
-                    nullid,
+                    sha1nodeconstants.nullid,
+                    sha1nodeconstants.nullid,
                     self.getFakeHash(),
                     None,
                 )
@@ -119,10 +119,19 @@
         """
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             node = self.getFakeHash()
-            revisions.append((filename, node, lastnode, nullid, nullid, None))
+            revisions.append(
+                (
+                    filename,
+                    node,
+                    lastnode,
+                    sha1nodeconstants.nullid,
+                    sha1nodeconstants.nullid,
+                    None,
+                )
+            )
             lastnode = node
 
         # revisions must be added in topological order, newest first
@@ -148,17 +157,17 @@
         for i in range(100):
             filename = b"filename-%d" % i
             entries = []
-            p2 = nullid
-            linknode = nullid
+            p2 = sha1nodeconstants.nullid
+            linknode = sha1nodeconstants.nullid
             for j in range(random.randint(1, 100)):
                 node = self.getFakeHash()
-                p1 = nullid
+                p1 = sha1nodeconstants.nullid
                 if len(entries) > 0:
                     p1 = entries[random.randint(0, len(entries) - 1)]
                 entries.append(node)
                 revisions.append((filename, node, p1, p2, linknode, None))
                 allentries[(filename, node)] = (p1, p2, linknode)
-                if p1 == nullid:
+                if p1 == sha1nodeconstants.nullid:
                     ancestorcounts[(filename, node)] = 1
                 else:
                     newcount = ancestorcounts[(filename, p1)] + 1
@@ -182,10 +191,19 @@
     def testGetNodeInfo(self):
         revisions = []
         filename = b"foo"
-        lastnode = nullid
+        lastnode = sha1nodeconstants.nullid
         for i in range(10):
             node = self.getFakeHash()
-            revisions.append((filename, node, lastnode, nullid, nullid, None))
+            revisions.append(
+                (
+                    filename,
+                    node,
+                    lastnode,
+                    sha1nodeconstants.nullid,
+                    sha1nodeconstants.nullid,
+                    None,
+                )
+            )
             lastnode = node
 
         pack = self.createPack(revisions)
@@ -233,7 +251,14 @@
         pack = self.createPack()
 
         try:
-            pack.add(b'filename', nullid, nullid, nullid, nullid, None)
+            pack.add(
+                b'filename',
+                sha1nodeconstants.nullid,
+                sha1nodeconstants.nullid,
+                sha1nodeconstants.nullid,
+                sha1nodeconstants.nullid,
+                None,
+            )
             self.assertTrue(False, "historypack.add should throw")
         except RuntimeError:
             pass
--- a/tests/test-revlog-raw.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-revlog-raw.py	Mon May 03 18:55:19 2021 +0200
@@ -6,7 +6,6 @@
 import hashlib
 import sys
 
-from mercurial.node import nullid
 from mercurial import (
     encoding,
     revlog,
@@ -93,7 +92,7 @@
     """
     nextrev = len(rlog)
     p1 = rlog.node(nextrev - 1)
-    p2 = nullid
+    p2 = rlog.nullid
     if isext:
         flags = revlog.REVIDX_EXTSTORED
     else:
@@ -127,7 +126,7 @@
     class dummychangegroup(object):
         @staticmethod
         def deltachunk(pnode):
-            pnode = pnode or nullid
+            pnode = pnode or rlog.nullid
             parentrev = rlog.rev(pnode)
             r = parentrev + 1
             if r >= len(rlog):
@@ -142,7 +141,7 @@
             return {
                 b'node': rlog.node(r),
                 b'p1': pnode,
-                b'p2': nullid,
+                b'p2': rlog.nullid,
                 b'cs': rlog.node(rlog.linkrev(r)),
                 b'flags': rlog.flags(r),
                 b'deltabase': rlog.node(deltaparent),
@@ -183,7 +182,7 @@
     dlog = newrevlog(destname, recreate=True)
     for r in rlog:
         p1 = rlog.node(r - 1)
-        p2 = nullid
+        p2 = rlog.nullid
         if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
             text = rlog.rawdata(r)
             cachedelta = None
--- a/tests/test-setdiscovery.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-setdiscovery.t	Mon May 03 18:55:19 2021 +0200
@@ -1536,7 +1536,7 @@
   searching for changes
   101 102 103 104 105 106 107 108 109 110  (no-eol)
   $ hg -R r1 --config extensions.blackbox= blackbox --config blackbox.track=
-  * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --cmdserver chgunix * (glob) (chg !)
+  * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
   * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* (glob)
   * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> found 101 common and 1 unknown server heads, 1 roundtrips in *.????s (glob)
   * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* exited 0 after *.?? seconds (glob)
--- a/tests/test-single-head.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-single-head.t	Mon May 03 18:55:19 2021 +0200
@@ -65,6 +65,9 @@
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ mkcommit c_dD0
   created new head
+  $ hg log -r 'heads(::branch("default"))' -T '{node|short}\n'
+  286d02a6e2a2
+  9bf953aa81f6
   $ hg push -f
   pushing to $TESTTMP/single-head-server
   searching for changes
--- a/tests/test-split.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-split.t	Mon May 03 18:55:19 2021 +0200
@@ -467,6 +467,7 @@
 #if obsstore-off
   $ runsplit -r 1 --no-rebase
   abort: cannot split changeset with children
+  (see 'hg help evolution.instability')
   [10]
 #else
   $ runsplit -r 1 --no-rebase >/dev/null
@@ -518,6 +519,7 @@
   $ rm .hg/localtags
   $ hg split $B --config experimental.evolution=createmarkers
   abort: cannot split changeset with children
+  (see 'hg help evolution.instability')
   [10]
   $ cat > $TESTTMP/messages <<EOF
   > Split B
--- a/tests/test-unamend.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-unamend.t	Mon May 03 18:55:19 2021 +0200
@@ -6,6 +6,7 @@
   > glog = log -G -T '{rev}:{node|short}  {desc}'
   > [experimental]
   > evolution = createmarkers, allowunstable
+  > evolution.allowdivergence = true
   > [extensions]
   > rebase =
   > amend =
@@ -284,6 +285,7 @@
 
   $ hg --config experimental.evolution=createmarkers unamend
   abort: cannot unamend changeset with children
+  (see 'hg help evolution.instability')
   [10]
 
   $ hg unamend
--- a/tests/test-uncommit.t	Sat May 01 00:28:39 2021 -0400
+++ b/tests/test-uncommit.t	Mon May 03 18:55:19 2021 +0200
@@ -51,7 +51,7 @@
 Uncommit with no commits should fail
 
   $ hg uncommit
-  abort: cannot uncommit null changeset
+  abort: cannot uncommit the null revision
   (no changeset checked out)
   [10]
 
@@ -410,7 +410,7 @@
   [20]
 
   $ hg uncommit --config experimental.uncommitondirtywdir=True
-  abort: cannot uncommit while merging
+  abort: cannot uncommit changesets while merging
   [20]
 
   $ hg status
--- a/tests/testlib/ext-sidedata.py	Sat May 01 00:28:39 2021 -0400
+++ b/tests/testlib/ext-sidedata.py	Mon May 03 18:55:19 2021 +0200
@@ -10,10 +10,7 @@
 import hashlib
 import struct
 
-from mercurial.node import (
-    nullid,
-    nullrev,
-)
+from mercurial.node import nullrev
 from mercurial import (
     extensions,
     requirements,
@@ -46,7 +43,7 @@
         return text, sd
     if self.version & 0xFFFF != 2:
         return text, sd
-    if nodeorrev != nullrev and nodeorrev != nullid:
+    if nodeorrev != nullrev and nodeorrev != self.nullid:
         cat1 = sd.get(sidedata.SD_TEST1)
         if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
             raise RuntimeError('text size mismatch')